summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c73
1 files changed, 44 insertions, 29 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c8be0fbc5145..f7c727801aab 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -7,7 +7,7 @@
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "util.h"
-#include "debugfs.h"
+#include <lk/debugfs.h>
#include <poll.h>
#include "cpumap.h"
#include "thread_map.h"
@@ -38,13 +38,12 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
evlist->workload.pid = -1;
}
-struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
- struct thread_map *threads)
+struct perf_evlist *perf_evlist__new(void)
{
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
if (evlist != NULL)
- perf_evlist__init(evlist, cpus, threads);
+ perf_evlist__init(evlist, NULL, NULL);
return evlist;
}
@@ -228,12 +227,14 @@ void perf_evlist__disable(struct perf_evlist *evlist)
{
int cpu, thread;
struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (!perf_evsel__is_group_leader(pos))
continue;
- for (thread = 0; thread < evlist->threads->nr; thread++)
+ for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_DISABLE, 0);
}
@@ -244,12 +245,14 @@ void perf_evlist__enable(struct perf_evlist *evlist)
{
int cpu, thread;
struct perf_evsel *pos;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
- for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
if (!perf_evsel__is_group_leader(pos))
continue;
- for (thread = 0; thread < evlist->threads->nr; thread++)
+ for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
PERF_EVENT_IOC_ENABLE, 0);
}
@@ -258,7 +261,9 @@ void perf_evlist__enable(struct perf_evlist *evlist)
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
+ int nfds = nr_cpus * nr_threads * evlist->nr_entries;
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
return evlist->pollfd != NULL ? 0 : -ENOMEM;
}
@@ -417,7 +422,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
if (cpu_map__all(evlist->cpus))
- evlist->nr_mmaps = evlist->threads->nr;
+ evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
@@ -442,11 +447,13 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
{
struct perf_evsel *evsel;
int cpu, thread;
+ int nr_cpus = cpu_map__nr(evlist->cpus);
+ int nr_threads = thread_map__nr(evlist->threads);
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
- for (thread = 0; thread < evlist->threads->nr; thread++) {
+ for (thread = 0; thread < nr_threads; thread++) {
list_for_each_entry(evsel, &evlist->entries, node) {
int fd = FD(evsel, cpu, thread);
@@ -470,7 +477,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
return 0;
out_unmap:
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
if (evlist->mmap[cpu].base != NULL) {
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
evlist->mmap[cpu].base = NULL;
@@ -483,8 +490,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
{
struct perf_evsel *evsel;
int thread;
+ int nr_threads = thread_map__nr(evlist->threads);
- for (thread = 0; thread < evlist->threads->nr; thread++) {
+ for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
list_for_each_entry(evsel, &evlist->entries, node) {
@@ -509,7 +517,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
return 0;
out_unmap:
- for (thread = 0; thread < evlist->threads->nr; thread++) {
+ for (thread = 0; thread < nr_threads; thread++) {
if (evlist->mmap[thread].base != NULL) {
munmap(evlist->mmap[thread].base, evlist->mmap_len);
evlist->mmap[thread].base = NULL;
@@ -610,7 +618,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
struct perf_evsel *evsel;
int err = 0;
const int ncpus = cpu_map__nr(evlist->cpus),
- nthreads = evlist->threads->nr;
+ nthreads = thread_map__nr(evlist->threads);
list_for_each_entry(evsel, &evlist->entries, node) {
if (evsel->filter == NULL)
@@ -629,7 +637,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
struct perf_evsel *evsel;
int err = 0;
const int ncpus = cpu_map__nr(evlist->cpus),
- nthreads = evlist->threads->nr;
+ nthreads = thread_map__nr(evlist->threads);
list_for_each_entry(evsel, &evlist->entries, node) {
err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
@@ -712,10 +720,20 @@ void perf_evlist__set_selected(struct perf_evlist *evlist,
evlist->selected = evsel;
}
+void perf_evlist__close(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int ncpus = cpu_map__nr(evlist->cpus);
+ int nthreads = thread_map__nr(evlist->threads);
+
+ list_for_each_entry_reverse(evsel, &evlist->entries, node)
+ perf_evsel__close(evsel, ncpus, nthreads);
+}
+
int perf_evlist__open(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- int err, ncpus, nthreads;
+ int err;
list_for_each_entry(evsel, &evlist->entries, node) {
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
@@ -725,19 +743,15 @@ int perf_evlist__open(struct perf_evlist *evlist)
return 0;
out_err:
- ncpus = evlist->cpus ? evlist->cpus->nr : 1;
- nthreads = evlist->threads ? evlist->threads->nr : 1;
-
- list_for_each_entry_reverse(evsel, &evlist->entries, node)
- perf_evsel__close(evsel, ncpus, nthreads);
-
+ perf_evlist__close(evlist);
errno = -err;
return err;
}
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
- struct perf_record_opts *opts,
- const char *argv[])
+ struct perf_target *target,
+ const char *argv[], bool pipe_output,
+ bool want_signal)
{
int child_ready_pipe[2], go_pipe[2];
char bf;
@@ -759,7 +773,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
}
if (!evlist->workload.pid) {
- if (opts->pipe_output)
+ if (pipe_output)
dup2(2, 1);
close(child_ready_pipe[0]);
@@ -787,11 +801,12 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
execvp(argv[0], (char **)argv);
perror(argv[0]);
- kill(getppid(), SIGUSR1);
+ if (want_signal)
+ kill(getppid(), SIGUSR1);
exit(-1);
}
- if (perf_target__none(&opts->target))
+ if (perf_target__none(target))
evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]);
OpenPOWER on IntegriCloud