1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include <error.h>
5 #include <linux/if.h>
6 #include <linux/if_tun.h>
7 #include <sys/uio.h>
8 
9 #include "bpf_flow.skel.h"
10 
11 #ifndef IP_MF
12 #define IP_MF 0x2000
13 #endif
14 
15 #define CHECK_FLOW_KEYS(desc, got, expected)				\
16 	CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0,		\
17 	      desc,							\
18 	      "nhoff=%u/%u "						\
19 	      "thoff=%u/%u "						\
20 	      "addr_proto=0x%x/0x%x "					\
21 	      "is_frag=%u/%u "						\
22 	      "is_first_frag=%u/%u "					\
23 	      "is_encap=%u/%u "						\
24 	      "ip_proto=0x%x/0x%x "					\
25 	      "n_proto=0x%x/0x%x "					\
26 	      "flow_label=0x%x/0x%x "					\
27 	      "sport=%u/%u "						\
28 	      "dport=%u/%u\n",						\
29 	      got.nhoff, expected.nhoff,				\
30 	      got.thoff, expected.thoff,				\
31 	      got.addr_proto, expected.addr_proto,			\
32 	      got.is_frag, expected.is_frag,				\
33 	      got.is_first_frag, expected.is_first_frag,		\
34 	      got.is_encap, expected.is_encap,				\
35 	      got.ip_proto, expected.ip_proto,				\
36 	      got.n_proto, expected.n_proto,				\
37 	      got.flow_label, expected.flow_label,			\
38 	      got.sport, expected.sport,				\
39 	      got.dport, expected.dport)
40 
41 struct ipv4_pkt {
42 	struct ethhdr eth;
43 	struct iphdr iph;
44 	struct tcphdr tcp;
45 } __packed;
46 
47 struct ipip_pkt {
48 	struct ethhdr eth;
49 	struct iphdr iph;
50 	struct iphdr iph_inner;
51 	struct tcphdr tcp;
52 } __packed;
53 
54 struct svlan_ipv4_pkt {
55 	struct ethhdr eth;
56 	__u16 vlan_tci;
57 	__u16 vlan_proto;
58 	struct iphdr iph;
59 	struct tcphdr tcp;
60 } __packed;
61 
62 struct ipv6_pkt {
63 	struct ethhdr eth;
64 	struct ipv6hdr iph;
65 	struct tcphdr tcp;
66 } __packed;
67 
68 struct ipv6_frag_pkt {
69 	struct ethhdr eth;
70 	struct ipv6hdr iph;
71 	struct frag_hdr {
72 		__u8 nexthdr;
73 		__u8 reserved;
74 		__be16 frag_off;
75 		__be32 identification;
76 	} ipf;
77 	struct tcphdr tcp;
78 } __packed;
79 
80 struct dvlan_ipv6_pkt {
81 	struct ethhdr eth;
82 	__u16 vlan_tci;
83 	__u16 vlan_proto;
84 	__u16 vlan_tci2;
85 	__u16 vlan_proto2;
86 	struct ipv6hdr iph;
87 	struct tcphdr tcp;
88 } __packed;
89 
90 struct test {
91 	const char *name;
92 	union {
93 		struct ipv4_pkt ipv4;
94 		struct svlan_ipv4_pkt svlan_ipv4;
95 		struct ipip_pkt ipip;
96 		struct ipv6_pkt ipv6;
97 		struct ipv6_frag_pkt ipv6_frag;
98 		struct dvlan_ipv6_pkt dvlan_ipv6;
99 	} pkt;
100 	struct bpf_flow_keys keys;
101 	__u32 flags;
102 };
103 
104 #define VLAN_HLEN	4
105 
106 static __u32 duration;
107 struct test tests[] = {
108 	{
109 		.name = "ipv4",
110 		.pkt.ipv4 = {
111 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
112 			.iph.ihl = 5,
113 			.iph.protocol = IPPROTO_TCP,
114 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
115 			.tcp.doff = 5,
116 			.tcp.source = 80,
117 			.tcp.dest = 8080,
118 		},
119 		.keys = {
120 			.nhoff = ETH_HLEN,
121 			.thoff = ETH_HLEN + sizeof(struct iphdr),
122 			.addr_proto = ETH_P_IP,
123 			.ip_proto = IPPROTO_TCP,
124 			.n_proto = __bpf_constant_htons(ETH_P_IP),
125 			.sport = 80,
126 			.dport = 8080,
127 		},
128 	},
129 	{
130 		.name = "ipv6",
131 		.pkt.ipv6 = {
132 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
133 			.iph.nexthdr = IPPROTO_TCP,
134 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
135 			.tcp.doff = 5,
136 			.tcp.source = 80,
137 			.tcp.dest = 8080,
138 		},
139 		.keys = {
140 			.nhoff = ETH_HLEN,
141 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
142 			.addr_proto = ETH_P_IPV6,
143 			.ip_proto = IPPROTO_TCP,
144 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
145 			.sport = 80,
146 			.dport = 8080,
147 		},
148 	},
149 	{
150 		.name = "802.1q-ipv4",
151 		.pkt.svlan_ipv4 = {
152 			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
153 			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
154 			.iph.ihl = 5,
155 			.iph.protocol = IPPROTO_TCP,
156 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
157 			.tcp.doff = 5,
158 			.tcp.source = 80,
159 			.tcp.dest = 8080,
160 		},
161 		.keys = {
162 			.nhoff = ETH_HLEN + VLAN_HLEN,
163 			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
164 			.addr_proto = ETH_P_IP,
165 			.ip_proto = IPPROTO_TCP,
166 			.n_proto = __bpf_constant_htons(ETH_P_IP),
167 			.sport = 80,
168 			.dport = 8080,
169 		},
170 	},
171 	{
172 		.name = "802.1ad-ipv6",
173 		.pkt.dvlan_ipv6 = {
174 			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
175 			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
176 			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
177 			.iph.nexthdr = IPPROTO_TCP,
178 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
179 			.tcp.doff = 5,
180 			.tcp.source = 80,
181 			.tcp.dest = 8080,
182 		},
183 		.keys = {
184 			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
185 			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
186 				sizeof(struct ipv6hdr),
187 			.addr_proto = ETH_P_IPV6,
188 			.ip_proto = IPPROTO_TCP,
189 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
190 			.sport = 80,
191 			.dport = 8080,
192 		},
193 	},
194 	{
195 		.name = "ipv4-frag",
196 		.pkt.ipv4 = {
197 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
198 			.iph.ihl = 5,
199 			.iph.protocol = IPPROTO_TCP,
200 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
201 			.iph.frag_off = __bpf_constant_htons(IP_MF),
202 			.tcp.doff = 5,
203 			.tcp.source = 80,
204 			.tcp.dest = 8080,
205 		},
206 		.keys = {
207 			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
208 			.nhoff = ETH_HLEN,
209 			.thoff = ETH_HLEN + sizeof(struct iphdr),
210 			.addr_proto = ETH_P_IP,
211 			.ip_proto = IPPROTO_TCP,
212 			.n_proto = __bpf_constant_htons(ETH_P_IP),
213 			.is_frag = true,
214 			.is_first_frag = true,
215 			.sport = 80,
216 			.dport = 8080,
217 		},
218 		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
219 	},
220 	{
221 		.name = "ipv4-no-frag",
222 		.pkt.ipv4 = {
223 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
224 			.iph.ihl = 5,
225 			.iph.protocol = IPPROTO_TCP,
226 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
227 			.iph.frag_off = __bpf_constant_htons(IP_MF),
228 			.tcp.doff = 5,
229 			.tcp.source = 80,
230 			.tcp.dest = 8080,
231 		},
232 		.keys = {
233 			.nhoff = ETH_HLEN,
234 			.thoff = ETH_HLEN + sizeof(struct iphdr),
235 			.addr_proto = ETH_P_IP,
236 			.ip_proto = IPPROTO_TCP,
237 			.n_proto = __bpf_constant_htons(ETH_P_IP),
238 			.is_frag = true,
239 			.is_first_frag = true,
240 		},
241 	},
242 	{
243 		.name = "ipv6-frag",
244 		.pkt.ipv6_frag = {
245 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
246 			.iph.nexthdr = IPPROTO_FRAGMENT,
247 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
248 			.ipf.nexthdr = IPPROTO_TCP,
249 			.tcp.doff = 5,
250 			.tcp.source = 80,
251 			.tcp.dest = 8080,
252 		},
253 		.keys = {
254 			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
255 			.nhoff = ETH_HLEN,
256 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
257 				sizeof(struct frag_hdr),
258 			.addr_proto = ETH_P_IPV6,
259 			.ip_proto = IPPROTO_TCP,
260 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
261 			.is_frag = true,
262 			.is_first_frag = true,
263 			.sport = 80,
264 			.dport = 8080,
265 		},
266 		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
267 	},
268 	{
269 		.name = "ipv6-no-frag",
270 		.pkt.ipv6_frag = {
271 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
272 			.iph.nexthdr = IPPROTO_FRAGMENT,
273 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
274 			.ipf.nexthdr = IPPROTO_TCP,
275 			.tcp.doff = 5,
276 			.tcp.source = 80,
277 			.tcp.dest = 8080,
278 		},
279 		.keys = {
280 			.nhoff = ETH_HLEN,
281 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
282 				sizeof(struct frag_hdr),
283 			.addr_proto = ETH_P_IPV6,
284 			.ip_proto = IPPROTO_TCP,
285 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
286 			.is_frag = true,
287 			.is_first_frag = true,
288 		},
289 	},
290 	{
291 		.name = "ipv6-flow-label",
292 		.pkt.ipv6 = {
293 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
294 			.iph.nexthdr = IPPROTO_TCP,
295 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
296 			.iph.flow_lbl = { 0xb, 0xee, 0xef },
297 			.tcp.doff = 5,
298 			.tcp.source = 80,
299 			.tcp.dest = 8080,
300 		},
301 		.keys = {
302 			.nhoff = ETH_HLEN,
303 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
304 			.addr_proto = ETH_P_IPV6,
305 			.ip_proto = IPPROTO_TCP,
306 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
307 			.sport = 80,
308 			.dport = 8080,
309 			.flow_label = __bpf_constant_htonl(0xbeeef),
310 		},
311 	},
312 	{
313 		.name = "ipv6-no-flow-label",
314 		.pkt.ipv6 = {
315 			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
316 			.iph.nexthdr = IPPROTO_TCP,
317 			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
318 			.iph.flow_lbl = { 0xb, 0xee, 0xef },
319 			.tcp.doff = 5,
320 			.tcp.source = 80,
321 			.tcp.dest = 8080,
322 		},
323 		.keys = {
324 			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
325 			.nhoff = ETH_HLEN,
326 			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
327 			.addr_proto = ETH_P_IPV6,
328 			.ip_proto = IPPROTO_TCP,
329 			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
330 			.flow_label = __bpf_constant_htonl(0xbeeef),
331 		},
332 		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
333 	},
334 	{
335 		.name = "ipip-encap",
336 		.pkt.ipip = {
337 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
338 			.iph.ihl = 5,
339 			.iph.protocol = IPPROTO_IPIP,
340 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
341 			.iph_inner.ihl = 5,
342 			.iph_inner.protocol = IPPROTO_TCP,
343 			.iph_inner.tot_len =
344 				__bpf_constant_htons(MAGIC_BYTES) -
345 				sizeof(struct iphdr),
346 			.tcp.doff = 5,
347 			.tcp.source = 80,
348 			.tcp.dest = 8080,
349 		},
350 		.keys = {
351 			.nhoff = ETH_HLEN,
352 			.thoff = ETH_HLEN + sizeof(struct iphdr) +
353 				sizeof(struct iphdr),
354 			.addr_proto = ETH_P_IP,
355 			.ip_proto = IPPROTO_TCP,
356 			.n_proto = __bpf_constant_htons(ETH_P_IP),
357 			.is_encap = true,
358 			.sport = 80,
359 			.dport = 8080,
360 		},
361 	},
362 	{
363 		.name = "ipip-no-encap",
364 		.pkt.ipip = {
365 			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
366 			.iph.ihl = 5,
367 			.iph.protocol = IPPROTO_IPIP,
368 			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
369 			.iph_inner.ihl = 5,
370 			.iph_inner.protocol = IPPROTO_TCP,
371 			.iph_inner.tot_len =
372 				__bpf_constant_htons(MAGIC_BYTES) -
373 				sizeof(struct iphdr),
374 			.tcp.doff = 5,
375 			.tcp.source = 80,
376 			.tcp.dest = 8080,
377 		},
378 		.keys = {
379 			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
380 			.nhoff = ETH_HLEN,
381 			.thoff = ETH_HLEN + sizeof(struct iphdr),
382 			.addr_proto = ETH_P_IP,
383 			.ip_proto = IPPROTO_IPIP,
384 			.n_proto = __bpf_constant_htons(ETH_P_IP),
385 			.is_encap = true,
386 		},
387 		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
388 	},
389 };
390 
create_tap(const char * ifname)391 static int create_tap(const char *ifname)
392 {
393 	struct ifreq ifr = {
394 		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
395 	};
396 	int fd, ret;
397 
398 	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
399 
400 	fd = open("/dev/net/tun", O_RDWR);
401 	if (fd < 0)
402 		return -1;
403 
404 	ret = ioctl(fd, TUNSETIFF, &ifr);
405 	if (ret)
406 		return -1;
407 
408 	return fd;
409 }
410 
tx_tap(int fd,void * pkt,size_t len)411 static int tx_tap(int fd, void *pkt, size_t len)
412 {
413 	struct iovec iov[] = {
414 		{
415 			.iov_len = len,
416 			.iov_base = pkt,
417 		},
418 	};
419 	return writev(fd, iov, ARRAY_SIZE(iov));
420 }
421 
ifup(const char * ifname)422 static int ifup(const char *ifname)
423 {
424 	struct ifreq ifr = {};
425 	int sk, ret;
426 
427 	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
428 
429 	sk = socket(PF_INET, SOCK_DGRAM, 0);
430 	if (sk < 0)
431 		return -1;
432 
433 	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
434 	if (ret) {
435 		close(sk);
436 		return -1;
437 	}
438 
439 	ifr.ifr_flags |= IFF_UP;
440 	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
441 	if (ret) {
442 		close(sk);
443 		return -1;
444 	}
445 
446 	close(sk);
447 	return 0;
448 }
449 
init_prog_array(struct bpf_object * obj,struct bpf_map * prog_array)450 static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
451 {
452 	int i, err, map_fd, prog_fd;
453 	struct bpf_program *prog;
454 	char prog_name[32];
455 
456 	map_fd = bpf_map__fd(prog_array);
457 	if (map_fd < 0)
458 		return -1;
459 
460 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
461 		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
462 
463 		prog = bpf_object__find_program_by_name(obj, prog_name);
464 		if (!prog)
465 			return -1;
466 
467 		prog_fd = bpf_program__fd(prog);
468 		if (prog_fd < 0)
469 			return -1;
470 
471 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
472 		if (err)
473 			return -1;
474 	}
475 	return 0;
476 }
477 
run_tests_skb_less(int tap_fd,struct bpf_map * keys)478 static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
479 {
480 	int i, err, keys_fd;
481 
482 	keys_fd = bpf_map__fd(keys);
483 	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
484 		return;
485 
486 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
487 		/* Keep in sync with 'flags' from eth_get_headlen. */
488 		__u32 eth_get_headlen_flags =
489 			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
490 		struct bpf_prog_test_run_attr tattr = {};
491 		struct bpf_flow_keys flow_keys = {};
492 		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
493 			    tests[i].keys.dport;
494 
495 		/* For skb-less case we can't pass input flags; run
496 		 * only the tests that have a matching set of flags.
497 		 */
498 
499 		if (tests[i].flags != eth_get_headlen_flags)
500 			continue;
501 
502 		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
503 		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
504 
505 		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
506 		CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
507 
508 		CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
509 		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
510 
511 		err = bpf_map_delete_elem(keys_fd, &key);
512 		CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err);
513 	}
514 }
515 
test_skb_less_prog_attach(struct bpf_flow * skel,int tap_fd)516 static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
517 {
518 	int err, prog_fd;
519 
520 	prog_fd = bpf_program__fd(skel->progs._dissect);
521 	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
522 		return;
523 
524 	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
525 	if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
526 		return;
527 
528 	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
529 
530 	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
531 	CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
532 }
533 
test_skb_less_link_create(struct bpf_flow * skel,int tap_fd)534 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
535 {
536 	struct bpf_link *link;
537 	int err, net_fd;
538 
539 	net_fd = open("/proc/self/ns/net", O_RDONLY);
540 	if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
541 		return;
542 
543 	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
544 	if (!ASSERT_OK_PTR(link, "attach_netns"))
545 		goto out_close;
546 
547 	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
548 
549 	err = bpf_link__destroy(link);
550 	CHECK(err, "bpf_link__destroy", "err %d\n", err);
551 out_close:
552 	close(net_fd);
553 }
554 
test_flow_dissector(void)555 void test_flow_dissector(void)
556 {
557 	int i, err, prog_fd, keys_fd = -1, tap_fd;
558 	struct bpf_flow *skel;
559 
560 	skel = bpf_flow__open_and_load();
561 	if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
562 		return;
563 
564 	prog_fd = bpf_program__fd(skel->progs._dissect);
565 	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
566 		goto out_destroy_skel;
567 	keys_fd = bpf_map__fd(skel->maps.last_dissection);
568 	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
569 		goto out_destroy_skel;
570 	err = init_prog_array(skel->obj, skel->maps.jmp_table);
571 	if (CHECK(err, "init_prog_array", "err %d\n", err))
572 		goto out_destroy_skel;
573 
574 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
575 		struct bpf_flow_keys flow_keys;
576 		struct bpf_prog_test_run_attr tattr = {
577 			.prog_fd = prog_fd,
578 			.data_in = &tests[i].pkt,
579 			.data_size_in = sizeof(tests[i].pkt),
580 			.data_out = &flow_keys,
581 		};
582 		static struct bpf_flow_keys ctx = {};
583 
584 		if (tests[i].flags) {
585 			tattr.ctx_in = &ctx;
586 			tattr.ctx_size_in = sizeof(ctx);
587 			ctx.flags = tests[i].flags;
588 		}
589 
590 		err = bpf_prog_test_run_xattr(&tattr);
591 		CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) ||
592 			   err || tattr.retval != 1,
593 			   tests[i].name,
594 			   "err %d errno %d retval %d duration %d size %u/%zu\n",
595 			   err, errno, tattr.retval, tattr.duration,
596 			   tattr.data_size_out, sizeof(flow_keys));
597 		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
598 	}
599 
600 	/* Do the same tests but for skb-less flow dissector.
601 	 * We use a known path in the net/tun driver that calls
602 	 * eth_get_headlen and we manually export bpf_flow_keys
603 	 * via BPF map in this case.
604 	 */
605 
606 	tap_fd = create_tap("tap0");
607 	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
608 	err = ifup("tap0");
609 	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
610 
611 	/* Test direct prog attachment */
612 	test_skb_less_prog_attach(skel, tap_fd);
613 	/* Test indirect prog attachment via link */
614 	test_skb_less_link_create(skel, tap_fd);
615 
616 	close(tap_fd);
617 out_destroy_skel:
618 	bpf_flow__destroy(skel);
619 }
620