1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3
4 #include <argp.h>
5 #include <linux/log2.h>
6 #include <pthread.h>
7 #include "bench.h"
8 #include "bloom_filter_bench.skel.h"
9 #include "bpf_util.h"
10
11 static struct ctx {
12 bool use_array_map;
13 bool use_hashmap;
14 bool hashmap_use_bloom;
15 bool count_false_hits;
16
17 struct bloom_filter_bench *skel;
18
19 int bloom_fd;
20 int hashmap_fd;
21 int array_map_fd;
22
23 pthread_mutex_t map_done_mtx;
24 pthread_cond_t map_done_cv;
25 bool map_done;
26 bool map_prepare_err;
27
28 __u32 next_map_idx;
29 } ctx = {
30 .map_done_mtx = PTHREAD_MUTEX_INITIALIZER,
31 .map_done_cv = PTHREAD_COND_INITIALIZER,
32 };
33
34 struct stat {
35 __u32 stats[3];
36 };
37
38 static struct {
39 __u32 nr_entries;
40 __u8 nr_hash_funcs;
41 __u8 value_size;
42 } args = {
43 .nr_entries = 1000,
44 .nr_hash_funcs = 3,
45 .value_size = 8,
46 };
47
48 enum {
49 ARG_NR_ENTRIES = 3000,
50 ARG_NR_HASH_FUNCS = 3001,
51 ARG_VALUE_SIZE = 3002,
52 };
53
54 static const struct argp_option opts[] = {
55 { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
56 "Set number of expected unique entries in the bloom filter"},
57 { "nr_hash_funcs", ARG_NR_HASH_FUNCS, "NR_HASH_FUNCS", 0,
58 "Set number of hash functions in the bloom filter"},
59 { "value_size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
60 "Set value size (in bytes) of bloom filter entries"},
61 {},
62 };
63
parse_arg(int key,char * arg,struct argp_state * state)64 static error_t parse_arg(int key, char *arg, struct argp_state *state)
65 {
66 switch (key) {
67 case ARG_NR_ENTRIES:
68 args.nr_entries = strtol(arg, NULL, 10);
69 if (args.nr_entries == 0) {
70 fprintf(stderr, "Invalid nr_entries count.");
71 argp_usage(state);
72 }
73 break;
74 case ARG_NR_HASH_FUNCS:
75 args.nr_hash_funcs = strtol(arg, NULL, 10);
76 if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) {
77 fprintf(stderr,
78 "The bloom filter must use 1 to 15 hash functions.");
79 argp_usage(state);
80 }
81 break;
82 case ARG_VALUE_SIZE:
83 args.value_size = strtol(arg, NULL, 10);
84 if (args.value_size < 2 || args.value_size > 256) {
85 fprintf(stderr,
86 "Invalid value size. Must be between 2 and 256 bytes");
87 argp_usage(state);
88 }
89 break;
90 default:
91 return ARGP_ERR_UNKNOWN;
92 }
93
94 return 0;
95 }
96
97 /* exported into benchmark runner */
98 const struct argp bench_bloom_map_argp = {
99 .options = opts,
100 .parser = parse_arg,
101 };
102
validate(void)103 static void validate(void)
104 {
105 if (env.consumer_cnt != 1) {
106 fprintf(stderr,
107 "The bloom filter benchmarks do not support multi-consumer use\n");
108 exit(1);
109 }
110 }
111
trigger_bpf_program(void)112 static inline void trigger_bpf_program(void)
113 {
114 syscall(__NR_getpgid);
115 }
116
producer(void * input)117 static void *producer(void *input)
118 {
119 while (true)
120 trigger_bpf_program();
121
122 return NULL;
123 }
124
map_prepare_thread(void * arg)125 static void *map_prepare_thread(void *arg)
126 {
127 __u32 val_size, i;
128 void *val = NULL;
129 int err;
130
131 val_size = args.value_size;
132 val = malloc(val_size);
133 if (!val) {
134 ctx.map_prepare_err = true;
135 goto done;
136 }
137
138 while (true) {
139 i = __atomic_add_fetch(&ctx.next_map_idx, 1, __ATOMIC_RELAXED);
140 if (i > args.nr_entries)
141 break;
142
143 again:
144 /* Populate hashmap, bloom filter map, and array map with the same
145 * random values
146 */
147 err = syscall(__NR_getrandom, val, val_size, 0);
148 if (err != val_size) {
149 ctx.map_prepare_err = true;
150 fprintf(stderr, "failed to get random value: %d\n", -errno);
151 break;
152 }
153
154 if (ctx.use_hashmap) {
155 err = bpf_map_update_elem(ctx.hashmap_fd, val, val, BPF_NOEXIST);
156 if (err) {
157 if (err != -EEXIST) {
158 ctx.map_prepare_err = true;
159 fprintf(stderr, "failed to add elem to hashmap: %d\n",
160 -errno);
161 break;
162 }
163 goto again;
164 }
165 }
166
167 i--;
168
169 if (ctx.use_array_map) {
170 err = bpf_map_update_elem(ctx.array_map_fd, &i, val, 0);
171 if (err) {
172 ctx.map_prepare_err = true;
173 fprintf(stderr, "failed to add elem to array map: %d\n", -errno);
174 break;
175 }
176 }
177
178 if (ctx.use_hashmap && !ctx.hashmap_use_bloom)
179 continue;
180
181 err = bpf_map_update_elem(ctx.bloom_fd, NULL, val, 0);
182 if (err) {
183 ctx.map_prepare_err = true;
184 fprintf(stderr,
185 "failed to add elem to bloom filter map: %d\n", -errno);
186 break;
187 }
188 }
189 done:
190 pthread_mutex_lock(&ctx.map_done_mtx);
191 ctx.map_done = true;
192 pthread_cond_signal(&ctx.map_done_cv);
193 pthread_mutex_unlock(&ctx.map_done_mtx);
194
195 if (val)
196 free(val);
197
198 return NULL;
199 }
200
populate_maps(void)201 static void populate_maps(void)
202 {
203 unsigned int nr_cpus = bpf_num_possible_cpus();
204 pthread_t map_thread;
205 int i, err, nr_rand_bytes;
206
207 ctx.bloom_fd = bpf_map__fd(ctx.skel->maps.bloom_map);
208 ctx.hashmap_fd = bpf_map__fd(ctx.skel->maps.hashmap);
209 ctx.array_map_fd = bpf_map__fd(ctx.skel->maps.array_map);
210
211 for (i = 0; i < nr_cpus; i++) {
212 err = pthread_create(&map_thread, NULL, map_prepare_thread,
213 NULL);
214 if (err) {
215 fprintf(stderr, "failed to create pthread: %d\n", -errno);
216 exit(1);
217 }
218 }
219
220 pthread_mutex_lock(&ctx.map_done_mtx);
221 while (!ctx.map_done)
222 pthread_cond_wait(&ctx.map_done_cv, &ctx.map_done_mtx);
223 pthread_mutex_unlock(&ctx.map_done_mtx);
224
225 if (ctx.map_prepare_err)
226 exit(1);
227
228 nr_rand_bytes = syscall(__NR_getrandom, ctx.skel->bss->rand_vals,
229 ctx.skel->rodata->nr_rand_bytes, 0);
230 if (nr_rand_bytes != ctx.skel->rodata->nr_rand_bytes) {
231 fprintf(stderr, "failed to get random bytes\n");
232 exit(1);
233 }
234 }
235
check_args(void)236 static void check_args(void)
237 {
238 if (args.value_size < 8) {
239 __u64 nr_unique_entries = 1ULL << (args.value_size * 8);
240
241 if (args.nr_entries > nr_unique_entries) {
242 fprintf(stderr,
243 "Not enough unique values for the nr_entries requested\n");
244 exit(1);
245 }
246 }
247 }
248
setup_skeleton(void)249 static struct bloom_filter_bench *setup_skeleton(void)
250 {
251 struct bloom_filter_bench *skel;
252
253 check_args();
254
255 setup_libbpf();
256
257 skel = bloom_filter_bench__open();
258 if (!skel) {
259 fprintf(stderr, "failed to open skeleton\n");
260 exit(1);
261 }
262
263 skel->rodata->hashmap_use_bloom = ctx.hashmap_use_bloom;
264 skel->rodata->count_false_hits = ctx.count_false_hits;
265
266 /* Resize number of entries */
267 bpf_map__set_max_entries(skel->maps.hashmap, args.nr_entries);
268
269 bpf_map__set_max_entries(skel->maps.array_map, args.nr_entries);
270
271 bpf_map__set_max_entries(skel->maps.bloom_map, args.nr_entries);
272
273 /* Set value size */
274 bpf_map__set_value_size(skel->maps.array_map, args.value_size);
275
276 bpf_map__set_value_size(skel->maps.bloom_map, args.value_size);
277
278 bpf_map__set_value_size(skel->maps.hashmap, args.value_size);
279
280 /* For the hashmap, we use the value as the key as well */
281 bpf_map__set_key_size(skel->maps.hashmap, args.value_size);
282
283 skel->bss->value_size = args.value_size;
284
285 /* Set number of hash functions */
286 bpf_map__set_map_extra(skel->maps.bloom_map, args.nr_hash_funcs);
287
288 if (bloom_filter_bench__load(skel)) {
289 fprintf(stderr, "failed to load skeleton\n");
290 exit(1);
291 }
292
293 return skel;
294 }
295
bloom_lookup_setup(void)296 static void bloom_lookup_setup(void)
297 {
298 struct bpf_link *link;
299
300 ctx.use_array_map = true;
301
302 ctx.skel = setup_skeleton();
303
304 populate_maps();
305
306 link = bpf_program__attach(ctx.skel->progs.bloom_lookup);
307 if (!link) {
308 fprintf(stderr, "failed to attach program!\n");
309 exit(1);
310 }
311 }
312
bloom_update_setup(void)313 static void bloom_update_setup(void)
314 {
315 struct bpf_link *link;
316
317 ctx.use_array_map = true;
318
319 ctx.skel = setup_skeleton();
320
321 populate_maps();
322
323 link = bpf_program__attach(ctx.skel->progs.bloom_update);
324 if (!link) {
325 fprintf(stderr, "failed to attach program!\n");
326 exit(1);
327 }
328 }
329
false_positive_setup(void)330 static void false_positive_setup(void)
331 {
332 struct bpf_link *link;
333
334 ctx.use_hashmap = true;
335 ctx.hashmap_use_bloom = true;
336 ctx.count_false_hits = true;
337
338 ctx.skel = setup_skeleton();
339
340 populate_maps();
341
342 link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
343 if (!link) {
344 fprintf(stderr, "failed to attach program!\n");
345 exit(1);
346 }
347 }
348
hashmap_with_bloom_setup(void)349 static void hashmap_with_bloom_setup(void)
350 {
351 struct bpf_link *link;
352
353 ctx.use_hashmap = true;
354 ctx.hashmap_use_bloom = true;
355
356 ctx.skel = setup_skeleton();
357
358 populate_maps();
359
360 link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
361 if (!link) {
362 fprintf(stderr, "failed to attach program!\n");
363 exit(1);
364 }
365 }
366
hashmap_no_bloom_setup(void)367 static void hashmap_no_bloom_setup(void)
368 {
369 struct bpf_link *link;
370
371 ctx.use_hashmap = true;
372
373 ctx.skel = setup_skeleton();
374
375 populate_maps();
376
377 link = bpf_program__attach(ctx.skel->progs.bloom_hashmap_lookup);
378 if (!link) {
379 fprintf(stderr, "failed to attach program!\n");
380 exit(1);
381 }
382 }
383
measure(struct bench_res * res)384 static void measure(struct bench_res *res)
385 {
386 unsigned long total_hits = 0, total_drops = 0, total_false_hits = 0;
387 static unsigned long last_hits, last_drops, last_false_hits;
388 unsigned int nr_cpus = bpf_num_possible_cpus();
389 int hit_key, drop_key, false_hit_key;
390 int i;
391
392 hit_key = ctx.skel->rodata->hit_key;
393 drop_key = ctx.skel->rodata->drop_key;
394 false_hit_key = ctx.skel->rodata->false_hit_key;
395
396 if (ctx.skel->bss->error != 0) {
397 fprintf(stderr, "error (%d) when searching the bloom filter\n",
398 ctx.skel->bss->error);
399 exit(1);
400 }
401
402 for (i = 0; i < nr_cpus; i++) {
403 struct stat *s = (void *)&ctx.skel->bss->percpu_stats[i];
404
405 total_hits += s->stats[hit_key];
406 total_drops += s->stats[drop_key];
407 total_false_hits += s->stats[false_hit_key];
408 }
409
410 res->hits = total_hits - last_hits;
411 res->drops = total_drops - last_drops;
412 res->false_hits = total_false_hits - last_false_hits;
413
414 last_hits = total_hits;
415 last_drops = total_drops;
416 last_false_hits = total_false_hits;
417 }
418
consumer(void * input)419 static void *consumer(void *input)
420 {
421 return NULL;
422 }
423
424 const struct bench bench_bloom_lookup = {
425 .name = "bloom-lookup",
426 .validate = validate,
427 .setup = bloom_lookup_setup,
428 .producer_thread = producer,
429 .consumer_thread = consumer,
430 .measure = measure,
431 .report_progress = hits_drops_report_progress,
432 .report_final = hits_drops_report_final,
433 };
434
435 const struct bench bench_bloom_update = {
436 .name = "bloom-update",
437 .validate = validate,
438 .setup = bloom_update_setup,
439 .producer_thread = producer,
440 .consumer_thread = consumer,
441 .measure = measure,
442 .report_progress = hits_drops_report_progress,
443 .report_final = hits_drops_report_final,
444 };
445
446 const struct bench bench_bloom_false_positive = {
447 .name = "bloom-false-positive",
448 .validate = validate,
449 .setup = false_positive_setup,
450 .producer_thread = producer,
451 .consumer_thread = consumer,
452 .measure = measure,
453 .report_progress = false_hits_report_progress,
454 .report_final = false_hits_report_final,
455 };
456
457 const struct bench bench_hashmap_without_bloom = {
458 .name = "hashmap-without-bloom",
459 .validate = validate,
460 .setup = hashmap_no_bloom_setup,
461 .producer_thread = producer,
462 .consumer_thread = consumer,
463 .measure = measure,
464 .report_progress = hits_drops_report_progress,
465 .report_final = hits_drops_report_final,
466 };
467
468 const struct bench bench_hashmap_with_bloom = {
469 .name = "hashmap-with-bloom",
470 .validate = validate,
471 .setup = hashmap_with_bloom_setup,
472 .producer_thread = producer,
473 .consumer_thread = consumer,
474 .measure = measure,
475 .report_progress = hits_drops_report_progress,
476 .report_final = hits_drops_report_final,
477 };
478