1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Davidlohr Bueso.
4  */
5 
6 /* For the CLR_() macros */
7 #include <string.h>
8 #include <pthread.h>
9 
10 #include <signal.h>
11 #include "../util/stat.h"
12 #include <subcmd/parse-options.h>
13 #include <linux/compiler.h>
14 #include <linux/kernel.h>
15 #include <linux/zalloc.h>
16 #include <errno.h>
17 #include <perf/cpumap.h>
18 #include "bench.h"
19 #include "futex.h"
20 
21 #include <err.h>
22 #include <stdlib.h>
23 #include <sys/time.h>
24 #include <sys/mman.h>
25 
26 struct worker {
27 	int tid;
28 	u_int32_t *futex;
29 	pthread_t thread;
30 	unsigned long ops;
31 };
32 
33 static u_int32_t global_futex = 0;
34 static struct worker *worker;
35 static bool done = false;
36 static int futex_flag = 0;
37 static pthread_mutex_t thread_lock;
38 static unsigned int threads_starting;
39 static struct stats throughput_stats;
40 static pthread_cond_t thread_parent, thread_worker;
41 
42 static struct bench_futex_parameters params = {
43 	.runtime  = 10,
44 };
45 
46 static const struct option options[] = {
47 	OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
48 	OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
49 	OPT_BOOLEAN( 'M', "multi",   &params.multi, "Use multiple futexes"),
50 	OPT_BOOLEAN( 's', "silent",  &params.silent, "Silent mode: do not display data/details"),
51 	OPT_BOOLEAN( 'S', "shared",  &params.fshared, "Use shared futexes instead of private ones"),
52 	OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
53 	OPT_END()
54 };
55 
56 static const char * const bench_futex_lock_pi_usage[] = {
57 	"perf bench futex lock-pi <options>",
58 	NULL
59 };
60 
print_summary(void)61 static void print_summary(void)
62 {
63 	unsigned long avg = avg_stats(&throughput_stats);
64 	double stddev = stddev_stats(&throughput_stats);
65 
66 	printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
67 	       !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
68 	       (int)bench__runtime.tv_sec);
69 }
70 
toggle_done(int sig __maybe_unused,siginfo_t * info __maybe_unused,void * uc __maybe_unused)71 static void toggle_done(int sig __maybe_unused,
72 			siginfo_t *info __maybe_unused,
73 			void *uc __maybe_unused)
74 {
75 	/* inform all threads that we're done for the day */
76 	done = true;
77 	gettimeofday(&bench__end, NULL);
78 	timersub(&bench__end, &bench__start, &bench__runtime);
79 }
80 
workerfn(void * arg)81 static void *workerfn(void *arg)
82 {
83 	struct worker *w = (struct worker *) arg;
84 	unsigned long ops = w->ops;
85 
86 	pthread_mutex_lock(&thread_lock);
87 	threads_starting--;
88 	if (!threads_starting)
89 		pthread_cond_signal(&thread_parent);
90 	pthread_cond_wait(&thread_worker, &thread_lock);
91 	pthread_mutex_unlock(&thread_lock);
92 
93 	do {
94 		int ret;
95 	again:
96 		ret = futex_lock_pi(w->futex, NULL, futex_flag);
97 
98 		if (ret) { /* handle lock acquisition */
99 			if (!params.silent)
100 				warn("thread %d: Could not lock pi-lock for %p (%d)",
101 				     w->tid, w->futex, ret);
102 			if (done)
103 				break;
104 
105 			goto again;
106 		}
107 
108 		usleep(1);
109 		ret = futex_unlock_pi(w->futex, futex_flag);
110 		if (ret && !params.silent)
111 			warn("thread %d: Could not unlock pi-lock for %p (%d)",
112 			     w->tid, w->futex, ret);
113 		ops++; /* account for thread's share of work */
114 	}  while (!done);
115 
116 	w->ops = ops;
117 	return NULL;
118 }
119 
create_threads(struct worker * w,pthread_attr_t thread_attr,struct perf_cpu_map * cpu)120 static void create_threads(struct worker *w, pthread_attr_t thread_attr,
121 			   struct perf_cpu_map *cpu)
122 {
123 	cpu_set_t cpuset;
124 	unsigned int i;
125 
126 	threads_starting = params.nthreads;
127 
128 	for (i = 0; i < params.nthreads; i++) {
129 		worker[i].tid = i;
130 
131 		if (params.multi) {
132 			worker[i].futex = calloc(1, sizeof(u_int32_t));
133 			if (!worker[i].futex)
134 				err(EXIT_FAILURE, "calloc");
135 		} else
136 			worker[i].futex = &global_futex;
137 
138 		CPU_ZERO(&cpuset);
139 		CPU_SET(cpu->map[i % cpu->nr], &cpuset);
140 
141 		if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
142 			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
143 
144 		if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
145 			err(EXIT_FAILURE, "pthread_create");
146 	}
147 }
148 
bench_futex_lock_pi(int argc,const char ** argv)149 int bench_futex_lock_pi(int argc, const char **argv)
150 {
151 	int ret = 0;
152 	unsigned int i;
153 	struct sigaction act;
154 	pthread_attr_t thread_attr;
155 	struct perf_cpu_map *cpu;
156 
157 	argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
158 	if (argc)
159 		goto err;
160 
161 	cpu = perf_cpu_map__new(NULL);
162 	if (!cpu)
163 		err(EXIT_FAILURE, "calloc");
164 
165 	memset(&act, 0, sizeof(act));
166 	sigfillset(&act.sa_mask);
167 	act.sa_sigaction = toggle_done;
168 	sigaction(SIGINT, &act, NULL);
169 
170 	if (params.mlockall) {
171 		if (mlockall(MCL_CURRENT | MCL_FUTURE))
172 			err(EXIT_FAILURE, "mlockall");
173 	}
174 
175 	if (!params.nthreads)
176 		params.nthreads = cpu->nr;
177 
178 	worker = calloc(params.nthreads, sizeof(*worker));
179 	if (!worker)
180 		err(EXIT_FAILURE, "calloc");
181 
182 	if (!params.fshared)
183 		futex_flag = FUTEX_PRIVATE_FLAG;
184 
185 	printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
186 	       getpid(), params.nthreads, params.runtime);
187 
188 	init_stats(&throughput_stats);
189 	pthread_mutex_init(&thread_lock, NULL);
190 	pthread_cond_init(&thread_parent, NULL);
191 	pthread_cond_init(&thread_worker, NULL);
192 
193 	threads_starting = params.nthreads;
194 	pthread_attr_init(&thread_attr);
195 	gettimeofday(&bench__start, NULL);
196 
197 	create_threads(worker, thread_attr, cpu);
198 	pthread_attr_destroy(&thread_attr);
199 
200 	pthread_mutex_lock(&thread_lock);
201 	while (threads_starting)
202 		pthread_cond_wait(&thread_parent, &thread_lock);
203 	pthread_cond_broadcast(&thread_worker);
204 	pthread_mutex_unlock(&thread_lock);
205 
206 	sleep(params.runtime);
207 	toggle_done(0, NULL, NULL);
208 
209 	for (i = 0; i < params.nthreads; i++) {
210 		ret = pthread_join(worker[i].thread, NULL);
211 		if (ret)
212 			err(EXIT_FAILURE, "pthread_join");
213 	}
214 
215 	/* cleanup & report results */
216 	pthread_cond_destroy(&thread_parent);
217 	pthread_cond_destroy(&thread_worker);
218 	pthread_mutex_destroy(&thread_lock);
219 
220 	for (i = 0; i < params.nthreads; i++) {
221 		unsigned long t = bench__runtime.tv_sec > 0 ?
222 			worker[i].ops / bench__runtime.tv_sec : 0;
223 
224 		update_stats(&throughput_stats, t);
225 		if (!params.silent)
226 			printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
227 			       worker[i].tid, worker[i].futex, t);
228 
229 		if (params.multi)
230 			zfree(&worker[i].futex);
231 	}
232 
233 	print_summary();
234 
235 	free(worker);
236 	perf_cpu_map__put(cpu);
237 	return ret;
238 err:
239 	usage_with_options(bench_futex_lock_pi_usage, options);
240 	exit(EXIT_FAILURE);
241 }
242