1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Common functionality for RV32 and RV64 BPF JIT compilers
4 *
5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
6 *
7 */
8
9 #ifndef _BPF_JIT_H
10 #define _BPF_JIT_H
11
12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <asm/cacheflush.h>
15
rvc_enabled(void)16 static inline bool rvc_enabled(void)
17 {
18 return IS_ENABLED(CONFIG_RISCV_ISA_C);
19 }
20
21 enum {
22 RV_REG_ZERO = 0, /* The constant value 0 */
23 RV_REG_RA = 1, /* Return address */
24 RV_REG_SP = 2, /* Stack pointer */
25 RV_REG_GP = 3, /* Global pointer */
26 RV_REG_TP = 4, /* Thread pointer */
27 RV_REG_T0 = 5, /* Temporaries */
28 RV_REG_T1 = 6,
29 RV_REG_T2 = 7,
30 RV_REG_FP = 8, /* Saved register/frame pointer */
31 RV_REG_S1 = 9, /* Saved register */
32 RV_REG_A0 = 10, /* Function argument/return values */
33 RV_REG_A1 = 11, /* Function arguments */
34 RV_REG_A2 = 12,
35 RV_REG_A3 = 13,
36 RV_REG_A4 = 14,
37 RV_REG_A5 = 15,
38 RV_REG_A6 = 16,
39 RV_REG_A7 = 17,
40 RV_REG_S2 = 18, /* Saved registers */
41 RV_REG_S3 = 19,
42 RV_REG_S4 = 20,
43 RV_REG_S5 = 21,
44 RV_REG_S6 = 22,
45 RV_REG_S7 = 23,
46 RV_REG_S8 = 24,
47 RV_REG_S9 = 25,
48 RV_REG_S10 = 26,
49 RV_REG_S11 = 27,
50 RV_REG_T3 = 28, /* Temporaries */
51 RV_REG_T4 = 29,
52 RV_REG_T5 = 30,
53 RV_REG_T6 = 31,
54 };
55
is_creg(u8 reg)56 static inline bool is_creg(u8 reg)
57 {
58 return (1 << reg) & (BIT(RV_REG_FP) |
59 BIT(RV_REG_S1) |
60 BIT(RV_REG_A0) |
61 BIT(RV_REG_A1) |
62 BIT(RV_REG_A2) |
63 BIT(RV_REG_A3) |
64 BIT(RV_REG_A4) |
65 BIT(RV_REG_A5));
66 }
67
68 struct rv_jit_context {
69 struct bpf_prog *prog;
70 u16 *insns; /* RV insns */
71 int ninsns;
72 int epilogue_offset;
73 int *offset; /* BPF to RV */
74 int nexentries;
75 unsigned long flags;
76 int stack_size;
77 };
78
79 /* Convert from ninsns to bytes. */
ninsns_rvoff(int ninsns)80 static inline int ninsns_rvoff(int ninsns)
81 {
82 return ninsns << 1;
83 }
84
85 struct rv_jit_data {
86 struct bpf_binary_header *header;
87 u8 *image;
88 struct rv_jit_context ctx;
89 };
90
bpf_fill_ill_insns(void * area,unsigned int size)91 static inline void bpf_fill_ill_insns(void *area, unsigned int size)
92 {
93 memset(area, 0, size);
94 }
95
bpf_flush_icache(void * start,void * end)96 static inline void bpf_flush_icache(void *start, void *end)
97 {
98 flush_icache_range((unsigned long)start, (unsigned long)end);
99 }
100
101 /* Emit a 4-byte riscv instruction. */
emit(const u32 insn,struct rv_jit_context * ctx)102 static inline void emit(const u32 insn, struct rv_jit_context *ctx)
103 {
104 if (ctx->insns) {
105 ctx->insns[ctx->ninsns] = insn;
106 ctx->insns[ctx->ninsns + 1] = (insn >> 16);
107 }
108
109 ctx->ninsns += 2;
110 }
111
112 /* Emit a 2-byte riscv compressed instruction. */
emitc(const u16 insn,struct rv_jit_context * ctx)113 static inline void emitc(const u16 insn, struct rv_jit_context *ctx)
114 {
115 BUILD_BUG_ON(!rvc_enabled());
116
117 if (ctx->insns)
118 ctx->insns[ctx->ninsns] = insn;
119
120 ctx->ninsns++;
121 }
122
epilogue_offset(struct rv_jit_context * ctx)123 static inline int epilogue_offset(struct rv_jit_context *ctx)
124 {
125 int to = ctx->epilogue_offset, from = ctx->ninsns;
126
127 return ninsns_rvoff(to - from);
128 }
129
130 /* Return -1 or inverted cond. */
invert_bpf_cond(u8 cond)131 static inline int invert_bpf_cond(u8 cond)
132 {
133 switch (cond) {
134 case BPF_JEQ:
135 return BPF_JNE;
136 case BPF_JGT:
137 return BPF_JLE;
138 case BPF_JLT:
139 return BPF_JGE;
140 case BPF_JGE:
141 return BPF_JLT;
142 case BPF_JLE:
143 return BPF_JGT;
144 case BPF_JNE:
145 return BPF_JEQ;
146 case BPF_JSGT:
147 return BPF_JSLE;
148 case BPF_JSLT:
149 return BPF_JSGE;
150 case BPF_JSGE:
151 return BPF_JSLT;
152 case BPF_JSLE:
153 return BPF_JSGT;
154 }
155 return -1;
156 }
157
is_6b_int(long val)158 static inline bool is_6b_int(long val)
159 {
160 return -(1L << 5) <= val && val < (1L << 5);
161 }
162
is_7b_uint(unsigned long val)163 static inline bool is_7b_uint(unsigned long val)
164 {
165 return val < (1UL << 7);
166 }
167
is_8b_uint(unsigned long val)168 static inline bool is_8b_uint(unsigned long val)
169 {
170 return val < (1UL << 8);
171 }
172
is_9b_uint(unsigned long val)173 static inline bool is_9b_uint(unsigned long val)
174 {
175 return val < (1UL << 9);
176 }
177
is_10b_int(long val)178 static inline bool is_10b_int(long val)
179 {
180 return -(1L << 9) <= val && val < (1L << 9);
181 }
182
is_10b_uint(unsigned long val)183 static inline bool is_10b_uint(unsigned long val)
184 {
185 return val < (1UL << 10);
186 }
187
is_12b_int(long val)188 static inline bool is_12b_int(long val)
189 {
190 return -(1L << 11) <= val && val < (1L << 11);
191 }
192
is_12b_check(int off,int insn)193 static inline int is_12b_check(int off, int insn)
194 {
195 if (!is_12b_int(off)) {
196 pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n",
197 insn, (int)off);
198 return -1;
199 }
200 return 0;
201 }
202
is_13b_int(long val)203 static inline bool is_13b_int(long val)
204 {
205 return -(1L << 12) <= val && val < (1L << 12);
206 }
207
is_21b_int(long val)208 static inline bool is_21b_int(long val)
209 {
210 return -(1L << 20) <= val && val < (1L << 20);
211 }
212
rv_offset(int insn,int off,struct rv_jit_context * ctx)213 static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
214 {
215 int from, to;
216
217 off++; /* BPF branch is from PC+1, RV is from PC */
218 from = (insn > 0) ? ctx->offset[insn - 1] : 0;
219 to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
220 return ninsns_rvoff(to - from);
221 }
222
223 /* Instruction formats. */
224
rv_r_insn(u8 funct7,u8 rs2,u8 rs1,u8 funct3,u8 rd,u8 opcode)225 static inline u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd,
226 u8 opcode)
227 {
228 return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
229 (rd << 7) | opcode;
230 }
231
rv_i_insn(u16 imm11_0,u8 rs1,u8 funct3,u8 rd,u8 opcode)232 static inline u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
233 {
234 return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
235 opcode;
236 }
237
rv_s_insn(u16 imm11_0,u8 rs2,u8 rs1,u8 funct3,u8 opcode)238 static inline u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
239 {
240 u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
241
242 return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
243 (imm4_0 << 7) | opcode;
244 }
245
rv_b_insn(u16 imm12_1,u8 rs2,u8 rs1,u8 funct3,u8 opcode)246 static inline u32 rv_b_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
247 {
248 u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
249 u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
250
251 return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
252 (imm4_1 << 7) | opcode;
253 }
254
rv_u_insn(u32 imm31_12,u8 rd,u8 opcode)255 static inline u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
256 {
257 return (imm31_12 << 12) | (rd << 7) | opcode;
258 }
259
rv_j_insn(u32 imm20_1,u8 rd,u8 opcode)260 static inline u32 rv_j_insn(u32 imm20_1, u8 rd, u8 opcode)
261 {
262 u32 imm;
263
264 imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
265 ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
266
267 return (imm << 12) | (rd << 7) | opcode;
268 }
269
rv_amo_insn(u8 funct5,u8 aq,u8 rl,u8 rs2,u8 rs1,u8 funct3,u8 rd,u8 opcode)270 static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
271 u8 funct3, u8 rd, u8 opcode)
272 {
273 u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
274
275 return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
276 }
277
278 /* RISC-V compressed instruction formats. */
279
rv_cr_insn(u8 funct4,u8 rd,u8 rs2,u8 op)280 static inline u16 rv_cr_insn(u8 funct4, u8 rd, u8 rs2, u8 op)
281 {
282 return (funct4 << 12) | (rd << 7) | (rs2 << 2) | op;
283 }
284
rv_ci_insn(u8 funct3,u32 imm6,u8 rd,u8 op)285 static inline u16 rv_ci_insn(u8 funct3, u32 imm6, u8 rd, u8 op)
286 {
287 u32 imm;
288
289 imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
290 return (funct3 << 13) | (rd << 7) | op | imm;
291 }
292
rv_css_insn(u8 funct3,u32 uimm,u8 rs2,u8 op)293 static inline u16 rv_css_insn(u8 funct3, u32 uimm, u8 rs2, u8 op)
294 {
295 return (funct3 << 13) | (uimm << 7) | (rs2 << 2) | op;
296 }
297
rv_ciw_insn(u8 funct3,u32 uimm,u8 rd,u8 op)298 static inline u16 rv_ciw_insn(u8 funct3, u32 uimm, u8 rd, u8 op)
299 {
300 return (funct3 << 13) | (uimm << 5) | ((rd & 0x7) << 2) | op;
301 }
302
rv_cl_insn(u8 funct3,u32 imm_hi,u8 rs1,u32 imm_lo,u8 rd,u8 op)303 static inline u16 rv_cl_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rd,
304 u8 op)
305 {
306 return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
307 (imm_lo << 5) | ((rd & 0x7) << 2) | op;
308 }
309
rv_cs_insn(u8 funct3,u32 imm_hi,u8 rs1,u32 imm_lo,u8 rs2,u8 op)310 static inline u16 rv_cs_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rs2,
311 u8 op)
312 {
313 return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
314 (imm_lo << 5) | ((rs2 & 0x7) << 2) | op;
315 }
316
rv_ca_insn(u8 funct6,u8 rd,u8 funct2,u8 rs2,u8 op)317 static inline u16 rv_ca_insn(u8 funct6, u8 rd, u8 funct2, u8 rs2, u8 op)
318 {
319 return (funct6 << 10) | ((rd & 0x7) << 7) | (funct2 << 5) |
320 ((rs2 & 0x7) << 2) | op;
321 }
322
rv_cb_insn(u8 funct3,u32 imm6,u8 funct2,u8 rd,u8 op)323 static inline u16 rv_cb_insn(u8 funct3, u32 imm6, u8 funct2, u8 rd, u8 op)
324 {
325 u32 imm;
326
327 imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
328 return (funct3 << 13) | (funct2 << 10) | ((rd & 0x7) << 7) | op | imm;
329 }
330
331 /* Instructions shared by both RV32 and RV64. */
332
rv_addi(u8 rd,u8 rs1,u16 imm11_0)333 static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
334 {
335 return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
336 }
337
rv_andi(u8 rd,u8 rs1,u16 imm11_0)338 static inline u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
339 {
340 return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
341 }
342
rv_ori(u8 rd,u8 rs1,u16 imm11_0)343 static inline u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
344 {
345 return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
346 }
347
rv_xori(u8 rd,u8 rs1,u16 imm11_0)348 static inline u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
349 {
350 return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
351 }
352
rv_slli(u8 rd,u8 rs1,u16 imm11_0)353 static inline u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
354 {
355 return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
356 }
357
rv_srli(u8 rd,u8 rs1,u16 imm11_0)358 static inline u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
359 {
360 return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
361 }
362
rv_srai(u8 rd,u8 rs1,u16 imm11_0)363 static inline u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
364 {
365 return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
366 }
367
rv_lui(u8 rd,u32 imm31_12)368 static inline u32 rv_lui(u8 rd, u32 imm31_12)
369 {
370 return rv_u_insn(imm31_12, rd, 0x37);
371 }
372
rv_auipc(u8 rd,u32 imm31_12)373 static inline u32 rv_auipc(u8 rd, u32 imm31_12)
374 {
375 return rv_u_insn(imm31_12, rd, 0x17);
376 }
377
rv_add(u8 rd,u8 rs1,u8 rs2)378 static inline u32 rv_add(u8 rd, u8 rs1, u8 rs2)
379 {
380 return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
381 }
382
rv_sub(u8 rd,u8 rs1,u8 rs2)383 static inline u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
384 {
385 return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
386 }
387
rv_sltu(u8 rd,u8 rs1,u8 rs2)388 static inline u32 rv_sltu(u8 rd, u8 rs1, u8 rs2)
389 {
390 return rv_r_insn(0, rs2, rs1, 3, rd, 0x33);
391 }
392
rv_and(u8 rd,u8 rs1,u8 rs2)393 static inline u32 rv_and(u8 rd, u8 rs1, u8 rs2)
394 {
395 return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
396 }
397
rv_or(u8 rd,u8 rs1,u8 rs2)398 static inline u32 rv_or(u8 rd, u8 rs1, u8 rs2)
399 {
400 return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
401 }
402
rv_xor(u8 rd,u8 rs1,u8 rs2)403 static inline u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
404 {
405 return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
406 }
407
rv_sll(u8 rd,u8 rs1,u8 rs2)408 static inline u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
409 {
410 return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
411 }
412
rv_srl(u8 rd,u8 rs1,u8 rs2)413 static inline u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
414 {
415 return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
416 }
417
rv_sra(u8 rd,u8 rs1,u8 rs2)418 static inline u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
419 {
420 return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
421 }
422
rv_mul(u8 rd,u8 rs1,u8 rs2)423 static inline u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
424 {
425 return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
426 }
427
rv_mulhu(u8 rd,u8 rs1,u8 rs2)428 static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2)
429 {
430 return rv_r_insn(1, rs2, rs1, 3, rd, 0x33);
431 }
432
rv_divu(u8 rd,u8 rs1,u8 rs2)433 static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
434 {
435 return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
436 }
437
rv_remu(u8 rd,u8 rs1,u8 rs2)438 static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
439 {
440 return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
441 }
442
rv_jal(u8 rd,u32 imm20_1)443 static inline u32 rv_jal(u8 rd, u32 imm20_1)
444 {
445 return rv_j_insn(imm20_1, rd, 0x6f);
446 }
447
rv_jalr(u8 rd,u8 rs1,u16 imm11_0)448 static inline u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
449 {
450 return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
451 }
452
rv_beq(u8 rs1,u8 rs2,u16 imm12_1)453 static inline u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
454 {
455 return rv_b_insn(imm12_1, rs2, rs1, 0, 0x63);
456 }
457
rv_bne(u8 rs1,u8 rs2,u16 imm12_1)458 static inline u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
459 {
460 return rv_b_insn(imm12_1, rs2, rs1, 1, 0x63);
461 }
462
rv_bltu(u8 rs1,u8 rs2,u16 imm12_1)463 static inline u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
464 {
465 return rv_b_insn(imm12_1, rs2, rs1, 6, 0x63);
466 }
467
rv_bgtu(u8 rs1,u8 rs2,u16 imm12_1)468 static inline u32 rv_bgtu(u8 rs1, u8 rs2, u16 imm12_1)
469 {
470 return rv_bltu(rs2, rs1, imm12_1);
471 }
472
rv_bgeu(u8 rs1,u8 rs2,u16 imm12_1)473 static inline u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
474 {
475 return rv_b_insn(imm12_1, rs2, rs1, 7, 0x63);
476 }
477
rv_bleu(u8 rs1,u8 rs2,u16 imm12_1)478 static inline u32 rv_bleu(u8 rs1, u8 rs2, u16 imm12_1)
479 {
480 return rv_bgeu(rs2, rs1, imm12_1);
481 }
482
rv_blt(u8 rs1,u8 rs2,u16 imm12_1)483 static inline u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
484 {
485 return rv_b_insn(imm12_1, rs2, rs1, 4, 0x63);
486 }
487
rv_bgt(u8 rs1,u8 rs2,u16 imm12_1)488 static inline u32 rv_bgt(u8 rs1, u8 rs2, u16 imm12_1)
489 {
490 return rv_blt(rs2, rs1, imm12_1);
491 }
492
rv_bge(u8 rs1,u8 rs2,u16 imm12_1)493 static inline u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
494 {
495 return rv_b_insn(imm12_1, rs2, rs1, 5, 0x63);
496 }
497
rv_ble(u8 rs1,u8 rs2,u16 imm12_1)498 static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1)
499 {
500 return rv_bge(rs2, rs1, imm12_1);
501 }
502
rv_lw(u8 rd,u16 imm11_0,u8 rs1)503 static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1)
504 {
505 return rv_i_insn(imm11_0, rs1, 2, rd, 0x03);
506 }
507
rv_lbu(u8 rd,u16 imm11_0,u8 rs1)508 static inline u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
509 {
510 return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
511 }
512
rv_lhu(u8 rd,u16 imm11_0,u8 rs1)513 static inline u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
514 {
515 return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
516 }
517
rv_sb(u8 rs1,u16 imm11_0,u8 rs2)518 static inline u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
519 {
520 return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
521 }
522
rv_sh(u8 rs1,u16 imm11_0,u8 rs2)523 static inline u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
524 {
525 return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
526 }
527
rv_sw(u8 rs1,u16 imm11_0,u8 rs2)528 static inline u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
529 {
530 return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
531 }
532
rv_amoadd_w(u8 rd,u8 rs2,u8 rs1,u8 aq,u8 rl)533 static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
534 {
535 return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
536 }
537
538 /* RVC instrutions. */
539
rvc_addi4spn(u8 rd,u32 imm10)540 static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
541 {
542 u32 imm;
543
544 imm = ((imm10 & 0x30) << 2) | ((imm10 & 0x3c0) >> 4) |
545 ((imm10 & 0x4) >> 1) | ((imm10 & 0x8) >> 3);
546 return rv_ciw_insn(0x0, imm, rd, 0x0);
547 }
548
rvc_lw(u8 rd,u32 imm7,u8 rs1)549 static inline u16 rvc_lw(u8 rd, u32 imm7, u8 rs1)
550 {
551 u32 imm_hi, imm_lo;
552
553 imm_hi = (imm7 & 0x38) >> 3;
554 imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
555 return rv_cl_insn(0x2, imm_hi, rs1, imm_lo, rd, 0x0);
556 }
557
rvc_sw(u8 rs1,u32 imm7,u8 rs2)558 static inline u16 rvc_sw(u8 rs1, u32 imm7, u8 rs2)
559 {
560 u32 imm_hi, imm_lo;
561
562 imm_hi = (imm7 & 0x38) >> 3;
563 imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
564 return rv_cs_insn(0x6, imm_hi, rs1, imm_lo, rs2, 0x0);
565 }
566
rvc_addi(u8 rd,u32 imm6)567 static inline u16 rvc_addi(u8 rd, u32 imm6)
568 {
569 return rv_ci_insn(0, imm6, rd, 0x1);
570 }
571
rvc_li(u8 rd,u32 imm6)572 static inline u16 rvc_li(u8 rd, u32 imm6)
573 {
574 return rv_ci_insn(0x2, imm6, rd, 0x1);
575 }
576
rvc_addi16sp(u32 imm10)577 static inline u16 rvc_addi16sp(u32 imm10)
578 {
579 u32 imm;
580
581 imm = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | ((imm10 & 0x40) >> 3) |
582 ((imm10 & 0x180) >> 6) | ((imm10 & 0x20) >> 5);
583 return rv_ci_insn(0x3, imm, RV_REG_SP, 0x1);
584 }
585
rvc_lui(u8 rd,u32 imm6)586 static inline u16 rvc_lui(u8 rd, u32 imm6)
587 {
588 return rv_ci_insn(0x3, imm6, rd, 0x1);
589 }
590
rvc_srli(u8 rd,u32 imm6)591 static inline u16 rvc_srli(u8 rd, u32 imm6)
592 {
593 return rv_cb_insn(0x4, imm6, 0, rd, 0x1);
594 }
595
rvc_srai(u8 rd,u32 imm6)596 static inline u16 rvc_srai(u8 rd, u32 imm6)
597 {
598 return rv_cb_insn(0x4, imm6, 0x1, rd, 0x1);
599 }
600
rvc_andi(u8 rd,u32 imm6)601 static inline u16 rvc_andi(u8 rd, u32 imm6)
602 {
603 return rv_cb_insn(0x4, imm6, 0x2, rd, 0x1);
604 }
605
rvc_sub(u8 rd,u8 rs)606 static inline u16 rvc_sub(u8 rd, u8 rs)
607 {
608 return rv_ca_insn(0x23, rd, 0, rs, 0x1);
609 }
610
rvc_xor(u8 rd,u8 rs)611 static inline u16 rvc_xor(u8 rd, u8 rs)
612 {
613 return rv_ca_insn(0x23, rd, 0x1, rs, 0x1);
614 }
615
rvc_or(u8 rd,u8 rs)616 static inline u16 rvc_or(u8 rd, u8 rs)
617 {
618 return rv_ca_insn(0x23, rd, 0x2, rs, 0x1);
619 }
620
rvc_and(u8 rd,u8 rs)621 static inline u16 rvc_and(u8 rd, u8 rs)
622 {
623 return rv_ca_insn(0x23, rd, 0x3, rs, 0x1);
624 }
625
rvc_slli(u8 rd,u32 imm6)626 static inline u16 rvc_slli(u8 rd, u32 imm6)
627 {
628 return rv_ci_insn(0, imm6, rd, 0x2);
629 }
630
rvc_lwsp(u8 rd,u32 imm8)631 static inline u16 rvc_lwsp(u8 rd, u32 imm8)
632 {
633 u32 imm;
634
635 imm = ((imm8 & 0xc0) >> 6) | (imm8 & 0x3c);
636 return rv_ci_insn(0x2, imm, rd, 0x2);
637 }
638
rvc_jr(u8 rs1)639 static inline u16 rvc_jr(u8 rs1)
640 {
641 return rv_cr_insn(0x8, rs1, RV_REG_ZERO, 0x2);
642 }
643
rvc_mv(u8 rd,u8 rs)644 static inline u16 rvc_mv(u8 rd, u8 rs)
645 {
646 return rv_cr_insn(0x8, rd, rs, 0x2);
647 }
648
rvc_jalr(u8 rs1)649 static inline u16 rvc_jalr(u8 rs1)
650 {
651 return rv_cr_insn(0x9, rs1, RV_REG_ZERO, 0x2);
652 }
653
rvc_add(u8 rd,u8 rs)654 static inline u16 rvc_add(u8 rd, u8 rs)
655 {
656 return rv_cr_insn(0x9, rd, rs, 0x2);
657 }
658
rvc_swsp(u32 imm8,u8 rs2)659 static inline u16 rvc_swsp(u32 imm8, u8 rs2)
660 {
661 u32 imm;
662
663 imm = (imm8 & 0x3c) | ((imm8 & 0xc0) >> 6);
664 return rv_css_insn(0x6, imm, rs2, 0x2);
665 }
666
667 /*
668 * RV64-only instructions.
669 *
670 * These instructions are not available on RV32. Wrap them below a #if to
671 * ensure that the RV32 JIT doesn't emit any of these instructions.
672 */
673
674 #if __riscv_xlen == 64
675
rv_addiw(u8 rd,u8 rs1,u16 imm11_0)676 static inline u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
677 {
678 return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
679 }
680
rv_slliw(u8 rd,u8 rs1,u16 imm11_0)681 static inline u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
682 {
683 return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
684 }
685
rv_srliw(u8 rd,u8 rs1,u16 imm11_0)686 static inline u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
687 {
688 return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
689 }
690
rv_sraiw(u8 rd,u8 rs1,u16 imm11_0)691 static inline u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
692 {
693 return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
694 }
695
rv_addw(u8 rd,u8 rs1,u8 rs2)696 static inline u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
697 {
698 return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
699 }
700
rv_subw(u8 rd,u8 rs1,u8 rs2)701 static inline u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
702 {
703 return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
704 }
705
rv_sllw(u8 rd,u8 rs1,u8 rs2)706 static inline u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
707 {
708 return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
709 }
710
rv_srlw(u8 rd,u8 rs1,u8 rs2)711 static inline u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
712 {
713 return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
714 }
715
rv_sraw(u8 rd,u8 rs1,u8 rs2)716 static inline u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
717 {
718 return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
719 }
720
rv_mulw(u8 rd,u8 rs1,u8 rs2)721 static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
722 {
723 return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
724 }
725
rv_divuw(u8 rd,u8 rs1,u8 rs2)726 static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
727 {
728 return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
729 }
730
rv_remuw(u8 rd,u8 rs1,u8 rs2)731 static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
732 {
733 return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
734 }
735
rv_ld(u8 rd,u16 imm11_0,u8 rs1)736 static inline u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
737 {
738 return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
739 }
740
rv_lwu(u8 rd,u16 imm11_0,u8 rs1)741 static inline u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
742 {
743 return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
744 }
745
rv_sd(u8 rs1,u16 imm11_0,u8 rs2)746 static inline u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
747 {
748 return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
749 }
750
rv_amoadd_d(u8 rd,u8 rs2,u8 rs1,u8 aq,u8 rl)751 static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
752 {
753 return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
754 }
755
756 /* RV64-only RVC instructions. */
757
rvc_ld(u8 rd,u32 imm8,u8 rs1)758 static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
759 {
760 u32 imm_hi, imm_lo;
761
762 imm_hi = (imm8 & 0x38) >> 3;
763 imm_lo = (imm8 & 0xc0) >> 6;
764 return rv_cl_insn(0x3, imm_hi, rs1, imm_lo, rd, 0x0);
765 }
766
rvc_sd(u8 rs1,u32 imm8,u8 rs2)767 static inline u16 rvc_sd(u8 rs1, u32 imm8, u8 rs2)
768 {
769 u32 imm_hi, imm_lo;
770
771 imm_hi = (imm8 & 0x38) >> 3;
772 imm_lo = (imm8 & 0xc0) >> 6;
773 return rv_cs_insn(0x7, imm_hi, rs1, imm_lo, rs2, 0x0);
774 }
775
rvc_subw(u8 rd,u8 rs)776 static inline u16 rvc_subw(u8 rd, u8 rs)
777 {
778 return rv_ca_insn(0x27, rd, 0, rs, 0x1);
779 }
780
rvc_addiw(u8 rd,u32 imm6)781 static inline u16 rvc_addiw(u8 rd, u32 imm6)
782 {
783 return rv_ci_insn(0x1, imm6, rd, 0x1);
784 }
785
rvc_ldsp(u8 rd,u32 imm9)786 static inline u16 rvc_ldsp(u8 rd, u32 imm9)
787 {
788 u32 imm;
789
790 imm = ((imm9 & 0x1c0) >> 6) | (imm9 & 0x38);
791 return rv_ci_insn(0x3, imm, rd, 0x2);
792 }
793
rvc_sdsp(u32 imm9,u8 rs2)794 static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
795 {
796 u32 imm;
797
798 imm = (imm9 & 0x38) | ((imm9 & 0x1c0) >> 6);
799 return rv_css_insn(0x7, imm, rs2, 0x2);
800 }
801
802 #endif /* __riscv_xlen == 64 */
803
804 /* Helper functions that emit RVC instructions when possible. */
805
emit_jalr(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)806 static inline void emit_jalr(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
807 {
808 if (rvc_enabled() && rd == RV_REG_RA && rs && !imm)
809 emitc(rvc_jalr(rs), ctx);
810 else if (rvc_enabled() && !rd && rs && !imm)
811 emitc(rvc_jr(rs), ctx);
812 else
813 emit(rv_jalr(rd, rs, imm), ctx);
814 }
815
emit_mv(u8 rd,u8 rs,struct rv_jit_context * ctx)816 static inline void emit_mv(u8 rd, u8 rs, struct rv_jit_context *ctx)
817 {
818 if (rvc_enabled() && rd && rs)
819 emitc(rvc_mv(rd, rs), ctx);
820 else
821 emit(rv_addi(rd, rs, 0), ctx);
822 }
823
emit_add(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)824 static inline void emit_add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
825 {
826 if (rvc_enabled() && rd && rd == rs1 && rs2)
827 emitc(rvc_add(rd, rs2), ctx);
828 else
829 emit(rv_add(rd, rs1, rs2), ctx);
830 }
831
emit_addi(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)832 static inline void emit_addi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
833 {
834 if (rvc_enabled() && rd == RV_REG_SP && rd == rs && is_10b_int(imm) && imm && !(imm & 0xf))
835 emitc(rvc_addi16sp(imm), ctx);
836 else if (rvc_enabled() && is_creg(rd) && rs == RV_REG_SP && is_10b_uint(imm) &&
837 !(imm & 0x3) && imm)
838 emitc(rvc_addi4spn(rd, imm), ctx);
839 else if (rvc_enabled() && rd && rd == rs && imm && is_6b_int(imm))
840 emitc(rvc_addi(rd, imm), ctx);
841 else
842 emit(rv_addi(rd, rs, imm), ctx);
843 }
844
emit_li(u8 rd,s32 imm,struct rv_jit_context * ctx)845 static inline void emit_li(u8 rd, s32 imm, struct rv_jit_context *ctx)
846 {
847 if (rvc_enabled() && rd && is_6b_int(imm))
848 emitc(rvc_li(rd, imm), ctx);
849 else
850 emit(rv_addi(rd, RV_REG_ZERO, imm), ctx);
851 }
852
emit_lui(u8 rd,s32 imm,struct rv_jit_context * ctx)853 static inline void emit_lui(u8 rd, s32 imm, struct rv_jit_context *ctx)
854 {
855 if (rvc_enabled() && rd && rd != RV_REG_SP && is_6b_int(imm) && imm)
856 emitc(rvc_lui(rd, imm), ctx);
857 else
858 emit(rv_lui(rd, imm), ctx);
859 }
860
emit_slli(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)861 static inline void emit_slli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
862 {
863 if (rvc_enabled() && rd && rd == rs && imm && (u32)imm < __riscv_xlen)
864 emitc(rvc_slli(rd, imm), ctx);
865 else
866 emit(rv_slli(rd, rs, imm), ctx);
867 }
868
emit_andi(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)869 static inline void emit_andi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
870 {
871 if (rvc_enabled() && is_creg(rd) && rd == rs && is_6b_int(imm))
872 emitc(rvc_andi(rd, imm), ctx);
873 else
874 emit(rv_andi(rd, rs, imm), ctx);
875 }
876
emit_srli(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)877 static inline void emit_srli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
878 {
879 if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
880 emitc(rvc_srli(rd, imm), ctx);
881 else
882 emit(rv_srli(rd, rs, imm), ctx);
883 }
884
emit_srai(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)885 static inline void emit_srai(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
886 {
887 if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
888 emitc(rvc_srai(rd, imm), ctx);
889 else
890 emit(rv_srai(rd, rs, imm), ctx);
891 }
892
emit_sub(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)893 static inline void emit_sub(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
894 {
895 if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
896 emitc(rvc_sub(rd, rs2), ctx);
897 else
898 emit(rv_sub(rd, rs1, rs2), ctx);
899 }
900
emit_or(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)901 static inline void emit_or(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
902 {
903 if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
904 emitc(rvc_or(rd, rs2), ctx);
905 else
906 emit(rv_or(rd, rs1, rs2), ctx);
907 }
908
emit_and(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)909 static inline void emit_and(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
910 {
911 if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
912 emitc(rvc_and(rd, rs2), ctx);
913 else
914 emit(rv_and(rd, rs1, rs2), ctx);
915 }
916
emit_xor(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)917 static inline void emit_xor(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
918 {
919 if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
920 emitc(rvc_xor(rd, rs2), ctx);
921 else
922 emit(rv_xor(rd, rs1, rs2), ctx);
923 }
924
emit_lw(u8 rd,s32 off,u8 rs1,struct rv_jit_context * ctx)925 static inline void emit_lw(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
926 {
927 if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_8b_uint(off) && !(off & 0x3))
928 emitc(rvc_lwsp(rd, off), ctx);
929 else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_7b_uint(off) && !(off & 0x3))
930 emitc(rvc_lw(rd, off, rs1), ctx);
931 else
932 emit(rv_lw(rd, off, rs1), ctx);
933 }
934
emit_sw(u8 rs1,s32 off,u8 rs2,struct rv_jit_context * ctx)935 static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
936 {
937 if (rvc_enabled() && rs1 == RV_REG_SP && is_8b_uint(off) && !(off & 0x3))
938 emitc(rvc_swsp(off, rs2), ctx);
939 else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_7b_uint(off) && !(off & 0x3))
940 emitc(rvc_sw(rs1, off, rs2), ctx);
941 else
942 emit(rv_sw(rs1, off, rs2), ctx);
943 }
944
945 /* RV64-only helper functions. */
946 #if __riscv_xlen == 64
947
emit_addiw(u8 rd,u8 rs,s32 imm,struct rv_jit_context * ctx)948 static inline void emit_addiw(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
949 {
950 if (rvc_enabled() && rd && rd == rs && is_6b_int(imm))
951 emitc(rvc_addiw(rd, imm), ctx);
952 else
953 emit(rv_addiw(rd, rs, imm), ctx);
954 }
955
emit_ld(u8 rd,s32 off,u8 rs1,struct rv_jit_context * ctx)956 static inline void emit_ld(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
957 {
958 if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_9b_uint(off) && !(off & 0x7))
959 emitc(rvc_ldsp(rd, off), ctx);
960 else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_8b_uint(off) && !(off & 0x7))
961 emitc(rvc_ld(rd, off, rs1), ctx);
962 else
963 emit(rv_ld(rd, off, rs1), ctx);
964 }
965
emit_sd(u8 rs1,s32 off,u8 rs2,struct rv_jit_context * ctx)966 static inline void emit_sd(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
967 {
968 if (rvc_enabled() && rs1 == RV_REG_SP && is_9b_uint(off) && !(off & 0x7))
969 emitc(rvc_sdsp(off, rs2), ctx);
970 else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_8b_uint(off) && !(off & 0x7))
971 emitc(rvc_sd(rs1, off, rs2), ctx);
972 else
973 emit(rv_sd(rs1, off, rs2), ctx);
974 }
975
emit_subw(u8 rd,u8 rs1,u8 rs2,struct rv_jit_context * ctx)976 static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
977 {
978 if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
979 emitc(rvc_subw(rd, rs2), ctx);
980 else
981 emit(rv_subw(rd, rs1, rs2), ctx);
982 }
983
984 #endif /* __riscv_xlen == 64 */
985
986 void bpf_jit_build_prologue(struct rv_jit_context *ctx);
987 void bpf_jit_build_epilogue(struct rv_jit_context *ctx);
988
989 int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
990 bool extra_pass);
991
992 #endif /* _BPF_JIT_H */
993