summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c157
1 files changed, 108 insertions, 49 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ebde59e61133..ed4583128b9c 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -60,6 +60,7 @@
#include <linux/stringify.h>
#include <linux/time64.h>
#include <fcntl.h>
+#include <sys/sysmacros.h>
#include "sane_ctype.h"
@@ -112,8 +113,9 @@ struct trace {
} stats;
unsigned int max_stack;
unsigned int min_stack;
- bool sort_events;
+ int raw_augmented_syscalls_args_size;
bool raw_augmented_syscalls;
+ bool sort_events;
bool not_ev_qualifier;
bool live;
bool full_time;
@@ -283,12 +285,17 @@ out_delete:
return -ENOENT;
}
-static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel)
+static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel, struct perf_evsel *tp)
{
struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
- if (evsel->priv != NULL) { /* field, sizeof_field, offsetof_field */
- if (__tp_field__init_uint(&sc->id, sizeof(long), sizeof(long long), evsel->needs_swap))
+ if (evsel->priv != NULL) {
+ struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
+ if (syscall_id == NULL)
+ syscall_id = perf_evsel__field(tp, "__syscall_nr");
+ if (syscall_id == NULL)
+ goto out_delete;
+ if (__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
goto out_delete;
return 0;
@@ -974,9 +981,9 @@ struct thread_trace {
char *name;
} filename;
struct {
- int max;
- char **table;
- } paths;
+ int max;
+ struct file *table;
+ } files;
struct intlist *syscall_stats;
};
@@ -986,7 +993,7 @@ static struct thread_trace *thread_trace__new(void)
struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
if (ttrace)
- ttrace->paths.max = -1;
+ ttrace->files.max = -1;
ttrace->syscall_stats = intlist__new(NULL);
@@ -1030,30 +1037,48 @@ void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
static const size_t trace__entry_str_size = 2048;
-static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
+static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
{
- struct thread_trace *ttrace = thread__priv(thread);
+ if (fd > ttrace->files.max) {
+ struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
- if (fd > ttrace->paths.max) {
- char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
-
- if (npath == NULL)
- return -1;
+ if (nfiles == NULL)
+ return NULL;
- if (ttrace->paths.max != -1) {
- memset(npath + ttrace->paths.max + 1, 0,
- (fd - ttrace->paths.max) * sizeof(char *));
+ if (ttrace->files.max != -1) {
+ memset(nfiles + ttrace->files.max + 1, 0,
+ (fd - ttrace->files.max) * sizeof(struct file));
} else {
- memset(npath, 0, (fd + 1) * sizeof(char *));
+ memset(nfiles, 0, (fd + 1) * sizeof(struct file));
}
- ttrace->paths.table = npath;
- ttrace->paths.max = fd;
+ ttrace->files.table = nfiles;
+ ttrace->files.max = fd;
}
- ttrace->paths.table[fd] = strdup(pathname);
+ return ttrace->files.table + fd;
+}
- return ttrace->paths.table[fd] != NULL ? 0 : -1;
+struct file *thread__files_entry(struct thread *thread, int fd)
+{
+ return thread_trace__files_entry(thread__priv(thread), fd);
+}
+
+static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
+{
+ struct thread_trace *ttrace = thread__priv(thread);
+ struct file *file = thread_trace__files_entry(ttrace, fd);
+
+ if (file != NULL) {
+ struct stat st;
+ if (stat(pathname, &st) == 0)
+ file->dev_maj = major(st.st_rdev);
+ file->pathname = strdup(pathname);
+ if (file->pathname)
+ return 0;
+ }
+
+ return -1;
}
static int thread__read_fd_path(struct thread *thread, int fd)
@@ -1093,7 +1118,7 @@ static const char *thread__fd_path(struct thread *thread, int fd,
if (fd < 0)
return NULL;
- if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
+ if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) {
if (!trace->live)
return NULL;
++trace->stats.proc_getname;
@@ -1101,7 +1126,7 @@ static const char *thread__fd_path(struct thread *thread, int fd,
return NULL;
}
- return ttrace->paths.table[fd];
+ return ttrace->files.table[fd].pathname;
}
size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
@@ -1140,8 +1165,8 @@ static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
struct thread_trace *ttrace = thread__priv(arg->thread);
- if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
- zfree(&ttrace->paths.table[fd]);
+ if (ttrace && fd >= 0 && fd <= ttrace->files.max)
+ zfree(&ttrace->files.table[fd].pathname);
return printed;
}
@@ -1733,6 +1758,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
size_t printed;
+ int len;
if (trace->failure_only || trace->current == NULL)
return 0;
@@ -1743,9 +1769,14 @@ static int trace__printf_interrupted_entry(struct trace *trace)
return 0;
printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
- printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str);
- ttrace->entry_pending = false;
+ printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
+
+ if (len < trace->args_alignment - 4)
+ printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
+
+ printed += fprintf(trace->output, " ...\n");
+ ttrace->entry_pending = false;
++trace->nr_events_printed;
return printed;
@@ -1768,16 +1799,16 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
return printed;
}
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size)
{
void *augmented_args = NULL;
/*
* For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
- * and there we get all 6 syscall args plus the tracepoint common
- * fields (sizeof(long)) and the syscall_nr (another long). So we check
- * if that is the case and if so don't look after the sc->args_size,
- * but always after the full raw_syscalls:sys_enter payload, which is
- * fixed.
+ * and there we get all 6 syscall args plus the tracepoint common fields
+ * that gets calculated at the start and the syscall_nr (another long).
+ * So we check if that is the case and if so don't look after the
+ * sc->args_size but always after the full raw_syscalls:sys_enter payload,
+ * which is fixed.
*
* We'll revisit this later to pass s->args_size to the BPF augmenter
* (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
@@ -1785,7 +1816,7 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
* use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
* traffic to just what is needed for each syscall.
*/
- int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
+ int args_size = raw_augmented_args_size ?: sc->args_size;
*augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
@@ -1839,7 +1870,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1897,7 +1928,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
@@ -2001,9 +2032,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (ttrace->entry_pending) {
printed = fprintf(trace->output, "%s", ttrace->entry_str);
} else {
- fprintf(trace->output, " ... [");
+ printed += fprintf(trace->output, " ... [");
color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
- fprintf(trace->output, "]: %s()", sc->name);
+ printed += 9;
+ printed += fprintf(trace->output, "]: %s()", sc->name);
}
printed++; /* the closing ')' */
@@ -2686,7 +2718,9 @@ static int trace__set_ev_qualifier_filter(struct trace *trace)
{
if (trace->syscalls.map)
return trace__set_ev_qualifier_bpf_filter(trace);
- return trace__set_ev_qualifier_tp_filter(trace);
+ if (trace->syscalls.events.sys_enter)
+ return trace__set_ev_qualifier_tp_filter(trace);
+ return 0;
}
static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
@@ -3812,13 +3846,6 @@ int cmd_trace(int argc, const char **argv)
* syscall.
*/
if (trace.syscalls.events.augmented) {
- evsel = trace.syscalls.events.augmented;
-
- if (perf_evsel__init_augmented_syscall_tp(evsel) ||
- perf_evsel__init_augmented_syscall_tp_args(evsel))
- goto out;
- evsel->handler = trace__sys_enter;
-
evlist__for_each_entry(trace.evlist, evsel) {
bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
@@ -3827,9 +3854,41 @@ int cmd_trace(int argc, const char **argv)
goto init_augmented_syscall_tp;
}
+ if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
+ struct perf_evsel *augmented = trace.syscalls.events.augmented;
+ if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
+ perf_evsel__init_augmented_syscall_tp_args(augmented))
+ goto out;
+ augmented->handler = trace__sys_enter;
+ }
+
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+ struct syscall_tp *sc;
init_augmented_syscall_tp:
- perf_evsel__init_augmented_syscall_tp(evsel);
+ if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
+ goto out;
+ sc = evsel->priv;
+ /*
+ * For now with BPF raw_augmented we hook into
+ * raw_syscalls:sys_enter and there we get all
+ * 6 syscall args plus the tracepoint common
+ * fields and the syscall_nr (another long).
+ * So we check if that is the case and if so
+ * don't look after the sc->args_size but
+ * always after the full raw_syscalls:sys_enter
+ * payload, which is fixed.
+ *
+ * We'll revisit this later to pass
+ * s->args_size to the BPF augmenter (now
+ * tools/perf/examples/bpf/augmented_raw_syscalls.c,
+ * so that it copies only what we need for each
+ * syscall, like what happens when we use
+ * syscalls:sys_enter_NAME, so that we reduce
+ * the kernel/userspace traffic to just what is
+ * needed for each syscall.
+ */
+ if (trace.raw_augmented_syscalls)
+ trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
}
OpenPOWER on IntegriCloud