Lines Matching refs:evlist
27 void perf_evlist__init(struct perf_evlist *evlist) in perf_evlist__init() argument
29 INIT_LIST_HEAD(&evlist->entries); in perf_evlist__init()
30 evlist->nr_entries = 0; in perf_evlist__init()
31 fdarray__init(&evlist->pollfd, 64); in perf_evlist__init()
32 perf_evlist__reset_id_hash(evlist); in perf_evlist__init()
35 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, in __perf_evlist__propagate_maps() argument
42 if (!evsel->own_cpus || evlist->has_user_cpus) { in __perf_evlist__propagate_maps()
44 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
45 } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) { in __perf_evlist__propagate_maps()
47 evsel->cpus = perf_cpu_map__get(evlist->cpus); in __perf_evlist__propagate_maps()
54 evsel->threads = perf_thread_map__get(evlist->threads); in __perf_evlist__propagate_maps()
55 evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus); in __perf_evlist__propagate_maps()
58 static void perf_evlist__propagate_maps(struct perf_evlist *evlist) in perf_evlist__propagate_maps() argument
62 perf_evlist__for_each_evsel(evlist, evsel) in perf_evlist__propagate_maps()
63 __perf_evlist__propagate_maps(evlist, evsel); in perf_evlist__propagate_maps()
66 void perf_evlist__add(struct perf_evlist *evlist, in perf_evlist__add() argument
69 evsel->idx = evlist->nr_entries; in perf_evlist__add()
70 list_add_tail(&evsel->node, &evlist->entries); in perf_evlist__add()
71 evlist->nr_entries += 1; in perf_evlist__add()
72 __perf_evlist__propagate_maps(evlist, evsel); in perf_evlist__add()
75 void perf_evlist__remove(struct perf_evlist *evlist, in perf_evlist__remove() argument
79 evlist->nr_entries -= 1; in perf_evlist__remove()
84 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); in perf_evlist__new() local
86 if (evlist != NULL) in perf_evlist__new()
87 perf_evlist__init(evlist); in perf_evlist__new()
89 return evlist; in perf_evlist__new()
93 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev) in perf_evlist__next() argument
98 next = list_first_entry(&evlist->entries, in perf_evlist__next()
106 if (&next->node == &evlist->entries) in perf_evlist__next()
112 static void perf_evlist__purge(struct perf_evlist *evlist) in perf_evlist__purge() argument
116 perf_evlist__for_each_entry_safe(evlist, n, pos) { in perf_evlist__purge()
121 evlist->nr_entries = 0; in perf_evlist__purge()
124 void perf_evlist__exit(struct perf_evlist *evlist) in perf_evlist__exit() argument
126 perf_cpu_map__put(evlist->cpus); in perf_evlist__exit()
127 perf_cpu_map__put(evlist->all_cpus); in perf_evlist__exit()
128 perf_thread_map__put(evlist->threads); in perf_evlist__exit()
129 evlist->cpus = NULL; in perf_evlist__exit()
130 evlist->all_cpus = NULL; in perf_evlist__exit()
131 evlist->threads = NULL; in perf_evlist__exit()
132 fdarray__exit(&evlist->pollfd); in perf_evlist__exit()
135 void perf_evlist__delete(struct perf_evlist *evlist) in perf_evlist__delete() argument
137 if (evlist == NULL) in perf_evlist__delete()
140 perf_evlist__munmap(evlist); in perf_evlist__delete()
141 perf_evlist__close(evlist); in perf_evlist__delete()
142 perf_evlist__purge(evlist); in perf_evlist__delete()
143 perf_evlist__exit(evlist); in perf_evlist__delete()
144 free(evlist); in perf_evlist__delete()
147 void perf_evlist__set_maps(struct perf_evlist *evlist, in perf_evlist__set_maps() argument
158 if (cpus != evlist->cpus) { in perf_evlist__set_maps()
159 perf_cpu_map__put(evlist->cpus); in perf_evlist__set_maps()
160 evlist->cpus = perf_cpu_map__get(cpus); in perf_evlist__set_maps()
163 if (threads != evlist->threads) { in perf_evlist__set_maps()
164 perf_thread_map__put(evlist->threads); in perf_evlist__set_maps()
165 evlist->threads = perf_thread_map__get(threads); in perf_evlist__set_maps()
168 if (!evlist->all_cpus && cpus) in perf_evlist__set_maps()
169 evlist->all_cpus = perf_cpu_map__get(cpus); in perf_evlist__set_maps()
171 perf_evlist__propagate_maps(evlist); in perf_evlist__set_maps()
174 int perf_evlist__open(struct perf_evlist *evlist) in perf_evlist__open() argument
179 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__open()
188 perf_evlist__close(evlist); in perf_evlist__open()
192 void perf_evlist__close(struct perf_evlist *evlist) in perf_evlist__close() argument
196 perf_evlist__for_each_entry_reverse(evlist, evsel) in perf_evlist__close()
200 void perf_evlist__enable(struct perf_evlist *evlist) in perf_evlist__enable() argument
204 perf_evlist__for_each_entry(evlist, evsel) in perf_evlist__enable()
208 void perf_evlist__disable(struct perf_evlist *evlist) in perf_evlist__disable() argument
212 perf_evlist__for_each_entry(evlist, evsel) in perf_evlist__disable()
216 u64 perf_evlist__read_format(struct perf_evlist *evlist) in perf_evlist__read_format() argument
218 struct perf_evsel *first = perf_evlist__first(evlist); in perf_evlist__read_format()
225 static void perf_evlist__id_hash(struct perf_evlist *evlist, in perf_evlist__id_hash() argument
235 hlist_add_head(&sid->node, &evlist->heads[hash]); in perf_evlist__id_hash()
238 void perf_evlist__reset_id_hash(struct perf_evlist *evlist) in perf_evlist__reset_id_hash() argument
243 INIT_HLIST_HEAD(&evlist->heads[i]); in perf_evlist__reset_id_hash()
246 void perf_evlist__id_add(struct perf_evlist *evlist, in perf_evlist__id_add() argument
250 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); in perf_evlist__id_add()
254 int perf_evlist__id_add_fd(struct perf_evlist *evlist, in perf_evlist__id_add_fd() argument
276 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) in perf_evlist__id_add_fd()
291 perf_evlist__id_add(evlist, evsel, cpu, thread, id); in perf_evlist__id_add_fd()
295 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) in perf_evlist__alloc_pollfd() argument
297 int nr_cpus = perf_cpu_map__nr(evlist->cpus); in perf_evlist__alloc_pollfd()
298 int nr_threads = perf_thread_map__nr(evlist->threads); in perf_evlist__alloc_pollfd()
302 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__alloc_pollfd()
309 if (fdarray__available_entries(&evlist->pollfd) < nfds && in perf_evlist__alloc_pollfd()
310 fdarray__grow(&evlist->pollfd, nfds) < 0) in perf_evlist__alloc_pollfd()
316 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, in perf_evlist__add_pollfd() argument
319 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags); in perf_evlist__add_pollfd()
322 evlist->pollfd.priv[pos].ptr = ptr; in perf_evlist__add_pollfd()
338 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) in perf_evlist__filter_pollfd() argument
340 return fdarray__filter(&evlist->pollfd, revents_and_mask, in perf_evlist__filter_pollfd()
344 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) in perf_evlist__poll() argument
346 return fdarray__poll(&evlist->pollfd, timeout); in perf_evlist__poll()
349 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite) in perf_evlist__alloc_mmap() argument
354 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); in perf_evlist__alloc_mmap()
358 for (i = 0; i < evlist->nr_mmaps; i++) { in perf_evlist__alloc_mmap()
386 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx) in perf_evlist__mmap_cb_get() argument
390 maps = overwrite ? evlist->mmap_ovw : evlist->mmap; in perf_evlist__mmap_cb_get()
393 maps = perf_evlist__alloc_mmap(evlist, overwrite); in perf_evlist__mmap_cb_get()
398 evlist->mmap_ovw = maps; in perf_evlist__mmap_cb_get()
400 evlist->mmap = maps; in perf_evlist__mmap_cb_get()
415 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__set_mmap_first() argument
419 evlist->mmap_ovw_first = map; in perf_evlist__set_mmap_first()
421 evlist->mmap_first = map; in perf_evlist__set_mmap_first()
425 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_evsel() argument
429 int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx); in mmap_per_evsel()
433 perf_evlist__for_each_entry(evlist, evsel) { in mmap_per_evsel()
445 map = ops->get(evlist, overwrite, idx); in mmap_per_evsel()
481 perf_evlist__set_mmap_first(evlist, map, overwrite); in mmap_per_evsel()
492 perf_evlist__add_pollfd(evlist, fd, map, revent, fdarray_flag__default) < 0) { in mmap_per_evsel()
498 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, in mmap_per_evsel()
509 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_thread() argument
513 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_thread()
520 ops->idx(evlist, mp, thread, false); in mmap_per_thread()
522 if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread, in mmap_per_thread()
530 perf_evlist__munmap(evlist); in mmap_per_thread()
535 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops, in mmap_per_cpu() argument
538 int nr_threads = perf_thread_map__nr(evlist->threads); in mmap_per_cpu()
539 int nr_cpus = perf_cpu_map__nr(evlist->cpus); in mmap_per_cpu()
547 ops->idx(evlist, mp, cpu, true); in mmap_per_cpu()
550 if (mmap_per_evsel(evlist, ops, cpu, mp, cpu, in mmap_per_cpu()
559 perf_evlist__munmap(evlist); in mmap_per_cpu()
563 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) in perf_evlist__nr_mmaps() argument
567 nr_mmaps = perf_cpu_map__nr(evlist->cpus); in perf_evlist__nr_mmaps()
568 if (perf_cpu_map__empty(evlist->cpus)) in perf_evlist__nr_mmaps()
569 nr_mmaps = perf_thread_map__nr(evlist->threads); in perf_evlist__nr_mmaps()
574 int perf_evlist__mmap_ops(struct perf_evlist *evlist, in perf_evlist__mmap_ops() argument
579 const struct perf_cpu_map *cpus = evlist->cpus; in perf_evlist__mmap_ops()
580 const struct perf_thread_map *threads = evlist->threads; in perf_evlist__mmap_ops()
585 mp->mask = evlist->mmap_len - page_size - 1; in perf_evlist__mmap_ops()
587 evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist); in perf_evlist__mmap_ops()
589 perf_evlist__for_each_entry(evlist, evsel) { in perf_evlist__mmap_ops()
596 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) in perf_evlist__mmap_ops()
600 return mmap_per_thread(evlist, ops, mp); in perf_evlist__mmap_ops()
602 return mmap_per_cpu(evlist, ops, mp); in perf_evlist__mmap_ops()
605 int perf_evlist__mmap(struct perf_evlist *evlist, int pages) in perf_evlist__mmap() argument
613 evlist->mmap_len = (pages + 1) * page_size; in perf_evlist__mmap()
615 return perf_evlist__mmap_ops(evlist, &ops, &mp); in perf_evlist__mmap()
618 void perf_evlist__munmap(struct perf_evlist *evlist) in perf_evlist__munmap() argument
622 if (evlist->mmap) { in perf_evlist__munmap()
623 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
624 perf_mmap__munmap(&evlist->mmap[i]); in perf_evlist__munmap()
627 if (evlist->mmap_ovw) { in perf_evlist__munmap()
628 for (i = 0; i < evlist->nr_mmaps; i++) in perf_evlist__munmap()
629 perf_mmap__munmap(&evlist->mmap_ovw[i]); in perf_evlist__munmap()
632 zfree(&evlist->mmap); in perf_evlist__munmap()
633 zfree(&evlist->mmap_ovw); in perf_evlist__munmap()
637 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map, in perf_evlist__next_mmap() argument
643 return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first; in perf_evlist__next_mmap()
659 void perf_evlist__set_leader(struct perf_evlist *evlist) in perf_evlist__set_leader() argument
661 if (evlist->nr_entries) { in perf_evlist__set_leader()
662 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; in perf_evlist__set_leader()
663 __perf_evlist__set_leader(&evlist->entries); in perf_evlist__set_leader()