1 { 2 "calls: two calls returning different map pointers for lookup (hash, array)", 3 .insns = { 4 /* main prog */ 5 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 6 BPF_CALL_REL(11), 7 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 8 BPF_CALL_REL(12), 9 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 10 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 11 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 12 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 13 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 14 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 15 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 16 BPF_MOV64_IMM(BPF_REG_0, 1), 17 BPF_EXIT_INSN(), 18 /* subprog 1 */ 19 BPF_LD_MAP_FD(BPF_REG_0, 0), 20 BPF_EXIT_INSN(), 21 /* subprog 2 */ 22 BPF_LD_MAP_FD(BPF_REG_0, 0), 23 BPF_EXIT_INSN(), 24 }, 25 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 26 .fixup_map_hash_48b = { 13 }, 27 .fixup_map_array_48b = { 16 }, 28 .result = ACCEPT, 29 .retval = 1, 30 }, 31 { 32 "calls: two calls returning different map pointers for lookup (hash, map in map)", 33 .insns = { 34 /* main prog */ 35 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), 36 BPF_CALL_REL(11), 37 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 38 BPF_CALL_REL(12), 39 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 40 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 41 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 42 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 43 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 44 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 45 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), 46 BPF_MOV64_IMM(BPF_REG_0, 1), 47 BPF_EXIT_INSN(), 48 /* subprog 1 */ 49 BPF_LD_MAP_FD(BPF_REG_0, 0), 50 BPF_EXIT_INSN(), 51 /* subprog 2 */ 52 BPF_LD_MAP_FD(BPF_REG_0, 0), 53 BPF_EXIT_INSN(), 54 }, 55 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 56 .fixup_map_in_map = { 16 }, 57 .fixup_map_array_48b = { 13 }, 58 .result = REJECT, 59 .errstr = "only read from bpf_array is supported", 60 }, 61 { 62 "cond: two branches returning different map pointers for lookup (tail, tail)", 63 .insns = { 64 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 65 offsetof(struct __sk_buff, mark)), 66 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3), 67 BPF_LD_MAP_FD(BPF_REG_2, 0), 68 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 69 BPF_LD_MAP_FD(BPF_REG_2, 0), 70 BPF_MOV64_IMM(BPF_REG_3, 7), 71 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 72 BPF_MOV64_IMM(BPF_REG_0, 1), 73 BPF_EXIT_INSN(), 74 }, 75 .fixup_prog1 = { 5 }, 76 .fixup_prog2 = { 2 }, 77 .result_unpriv = REJECT, 78 .errstr_unpriv = "tail_call abusing map_ptr", 79 .result = ACCEPT, 80 .retval = 42, 81 }, 82 { 83 "cond: two branches returning same map pointers for lookup (tail, tail)", 84 .insns = { 85 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 86 offsetof(struct __sk_buff, mark)), 87 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3), 88 BPF_LD_MAP_FD(BPF_REG_2, 0), 89 BPF_JMP_IMM(BPF_JA, 0, 0, 2), 90 BPF_LD_MAP_FD(BPF_REG_2, 0), 91 BPF_MOV64_IMM(BPF_REG_3, 7), 92 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), 93 BPF_MOV64_IMM(BPF_REG_0, 1), 94 BPF_EXIT_INSN(), 95 }, 96 .fixup_prog2 = { 2, 5 }, 97 .result_unpriv = ACCEPT, 98 .result = ACCEPT, 99 .retval = 42, 100 }, 101