diff options
Diffstat (limited to 'tools')
38 files changed, 1810 insertions, 623 deletions
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index b21b586b9854..1738c0391da4 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -6,8 +6,9 @@ #include <stdbool.h> #define spinlock_t pthread_mutex_t -#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER; +#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER #define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER +#define spin_lock_init(x) pthread_mutex_init(x, NULL) #define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) #define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5922443063f0..0f9f06df49bc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2035,7 +2035,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, return -EINVAL; obj = bpf_object__open(attr->file); - if (IS_ERR(obj)) + if (IS_ERR_OR_NULL(obj)) return -ENOENT; bpf_object__for_each_program(prog, obj) { diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h index b3e32b010ab1..c2c01f84df75 100644 --- a/tools/objtool/arch/x86/include/asm/insn.h +++ b/tools/objtool/arch/x86/include/asm/insn.h @@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn) return insn_offset_displacement(insn) + insn->displacement.nbytes; } +#define POP_SS_OPCODE 0x1f +#define MOV_SREG_OPCODE 0x8e + +/* + * Intel SDM Vol.3A 6.8.3 states; + * "Any single-step trap that would be delivered following the MOV to SS + * instruction or POP to SS instruction (because EFLAGS.TF is 1) is + * suppressed." + * This function returns true if @insn is MOV SS or POP SS. On these + * instructions, single stepping is suppressed. + */ +static inline int insn_masking_exception(struct insn *insn) +{ + return insn->opcode.bytes[0] == POP_SS_OPCODE || + (insn->opcode.bytes[0] == MOV_SREG_OPCODE && + X86_MODRM_REG(insn->modrm.bytes[0]) == 2); +} + #endif /* _ASM_X86_INSN_H */ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5409f6f6c48d..3a31b238f885 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file, return next; } +static struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = list_next_entry(insn, list); + struct symbol *func = insn->func; + + if (!func) + return NULL; + + if (&next->list != &file->insn_list && next->func == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +#define func_for_each_insn_all(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + #define func_for_each_insn(file, func, insn) \ for (insn = find_insn(file, func->sec, func->offset); \ insn && &insn->list != &file->insn_list && \ @@ -149,10 +174,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, if (!strcmp(func->name, global_noreturns[i])) return 1; - if (!func->sec) + if (!func->len) return 0; - func_for_each_insn(file, func, insn) { + insn = find_insn(file, func->sec, func->offset); + if (!insn->func) + return 0; + + func_for_each_insn_all(file, func, insn) { empty = false; if (insn->type == INSN_RETURN) @@ -167,35 +196,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, * case, the function's dead-end status depends on whether the target * of the sibling call returns. */ - func_for_each_insn(file, func, insn) { - if (insn->sec != func->sec || - insn->offset >= func->offset + func->len) - break; - + func_for_each_insn_all(file, func, insn) { if (insn->type == INSN_JUMP_UNCONDITIONAL) { struct instruction *dest = insn->jump_dest; - struct symbol *dest_func; if (!dest) /* sibling call to another file */ return 0; - if (dest->sec != func->sec || - dest->offset < func->offset || - dest->offset >= func->offset + func->len) { - /* local sibling call */ - dest_func = find_symbol_by_offset(dest->sec, - dest->offset); - if (!dest_func) - continue; + if (dest->func && dest->func->pfunc != insn->func->pfunc) { + /* local sibling call */ if (recursion == 5) { - WARN_FUNC("infinite recursion (objtool bug!)", - dest->sec, dest->offset); - return -1; + /* + * Infinite recursion: two functions + * have sibling calls to each other. + * This is a very rare case. It means + * they aren't dead ends. + */ + return 0; } - return __dead_end_function(file, dest_func, + return __dead_end_function(file, dest->func, recursion + 1); } } @@ -422,7 +444,7 @@ static void add_ignores(struct objtool_file *file) if (!ignore_func(file, func)) continue; - func_for_each_insn(file, func, insn) + func_for_each_insn_all(file, func, insn) insn->ignore = true; } } @@ -782,30 +804,35 @@ out: return ret; } -static int add_switch_table(struct objtool_file *file, struct symbol *func, - struct instruction *insn, struct rela *table, - struct rela *next_table) +static int add_switch_table(struct objtool_file *file, struct instruction *insn, + struct rela *table, struct rela *next_table) { struct rela *rela = table; struct instruction *alt_insn; struct alternative *alt; + struct symbol *pfunc = insn->func->pfunc; + unsigned int prev_offset = 0; list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) { if (rela == next_table) break; - if (rela->sym->sec != insn->sec || - rela->addend <= func->offset || - rela->addend >= func->offset + func->len) + /* Make sure the switch table entries are consecutive: */ + if (prev_offset && rela->offset != prev_offset + 8) break; - alt_insn = find_insn(file, insn->sec, rela->addend); - if (!alt_insn) { - WARN("%s: can't find instruction at %s+0x%x", - file->rodata->rela->name, insn->sec->name, - rela->addend); - return -1; - } + /* Detect function pointers from contiguous objects: */ + if (rela->sym->sec == pfunc->sec && + rela->addend == pfunc->offset) + break; + + alt_insn = find_insn(file, rela->sym->sec, rela->addend); + if (!alt_insn) + break; + + /* Make sure the jmp dest is in the function or subfunction: */ + if (alt_insn->func->pfunc != pfunc) + break; alt = malloc(sizeof(*alt)); if (!alt) { @@ -815,6 +842,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func, alt->insn = alt_insn; list_add_tail(&alt->list, &insn->alts); + prev_offset = rela->offset; + } + + if (!prev_offset) { + WARN_FUNC("can't find switch jump table", + insn->sec, insn->offset); + return -1; } return 0; @@ -869,40 +903,21 @@ static struct rela *find_switch_table(struct objtool_file *file, { struct rela *text_rela, *rodata_rela; struct instruction *orig_insn = insn; + unsigned long table_offset; - text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) { - /* case 1 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend); - if (rodata_rela) - return rodata_rela; - - /* case 2 */ - rodata_rela = find_rela_by_dest(file->rodata, - text_rela->addend + 4); - if (!rodata_rela) - return NULL; - - file->ignore_unreachables = true; - return rodata_rela; - } - - /* case 3 */ /* * Backward search using the @first_jump_src links, these help avoid * much of the 'in between' code. Which avoids us getting confused by * it. */ - for (insn = list_prev_entry(insn, list); - + for (; &insn->list != &file->insn_list && insn->sec == func->sec && insn->offset >= func->offset; insn = insn->first_jump_src ?: list_prev_entry(insn, list)) { - if (insn->type == INSN_JUMP_DYNAMIC) + if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) break; /* allow small jumps within the range */ @@ -918,18 +933,29 @@ static struct rela *find_switch_table(struct objtool_file *file, if (!text_rela || text_rela->sym != file->rodata->sym) continue; + table_offset = text_rela->addend; + if (text_rela->type == R_X86_64_PC32) + table_offset += 4; + /* * Make sure the .rodata address isn't associated with a * symbol. gcc jump tables are anonymous data. */ - if (find_symbol_containing(file->rodata, text_rela->addend)) + if (find_symbol_containing(file->rodata, table_offset)) continue; - rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend); - if (!rodata_rela) - continue; + rodata_rela = find_rela_by_dest(file->rodata, table_offset); + if (rodata_rela) { + /* + * Use of RIP-relative switch jumps is quite rare, and + * indicates a rare GCC quirk/bug which can leave dead + * code behind. + */ + if (text_rela->type == R_X86_64_PC32) + file->ignore_unreachables = true; - return rodata_rela; + return rodata_rela; + } } return NULL; @@ -943,7 +969,7 @@ static int add_func_switch_tables(struct objtool_file *file, struct rela *rela, *prev_rela = NULL; int ret; - func_for_each_insn(file, func, insn) { + func_for_each_insn_all(file, func, insn) { if (!last) last = insn; @@ -974,8 +1000,7 @@ static int add_func_switch_tables(struct objtool_file *file, * the beginning of another switch table in the same function. */ if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, - rela); + ret = add_switch_table(file, prev_jump, prev_rela, rela); if (ret) return ret; } @@ -985,7 +1010,7 @@ static int add_func_switch_tables(struct objtool_file *file, } if (prev_jump) { - ret = add_switch_table(file, func, prev_jump, prev_rela, NULL); + ret = add_switch_table(file, prev_jump, prev_rela, NULL); if (ret) return ret; } @@ -1749,15 +1774,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, while (1) { next_insn = next_insn_same_sec(file, insn); - - if (file->c_file && func && insn->func && func != insn->func) { + if (file->c_file && func && insn->func && func != insn->func->pfunc) { WARN("%s() falls through to next function %s()", func->name, insn->func->name); return 1; } - if (insn->func) - func = insn->func; + func = insn->func ? insn->func->pfunc : NULL; if (func && insn->ignore) { WARN_FUNC("BUG: why am I validating an ignored function?", @@ -1778,7 +1801,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, i = insn; save_insn = NULL; - func_for_each_insn_continue_reverse(file, func, i) { + func_for_each_insn_continue_reverse(file, insn->func, i) { if (i->save) { save_insn = i; break; @@ -1865,7 +1888,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_UNCONDITIONAL: if (insn->jump_dest && (!func || !insn->jump_dest->func || - func == insn->jump_dest->func)) { + insn->jump_dest->func->pfunc == func)) { ret = validate_branch(file, insn->jump_dest, state); if (ret) @@ -2060,7 +2083,7 @@ static int validate_functions(struct objtool_file *file) for_each_sec(file, sec) { list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) + if (func->type != STT_FUNC || func->pfunc != func) continue; insn = find_insn(file, sec, func->offset); diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index c1c338661699..4e60e105583e 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_by_name(struct elf *elf, const char *name) +{ + struct section *sec; + struct symbol *sym; + + list_for_each_entry(sec, &elf->sections, list) + list_for_each_entry(sym, &sec->symbol_list, list) + if (!strcmp(sym->name, name)) + return sym; + + return NULL; +} + struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) { struct symbol *sym; @@ -203,10 +216,11 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { - struct section *symtab; - struct symbol *sym; + struct section *symtab, *sec; + struct symbol *sym, *pfunc; struct list_head *entry, *tmp; int symbols_nr, i; + char *coldstr; symtab = find_section_by_name(elf, ".symtab"); if (!symtab) { @@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf) hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } + /* Create parent/child links for any cold subfunctions */ + list_for_each_entry(sec, &elf->sections, list) { + list_for_each_entry(sym, &sec->symbol_list, list) { + if (sym->type != STT_FUNC) + continue; + sym->pfunc = sym->cfunc = sym; + coldstr = strstr(sym->name, ".cold."); + if (coldstr) { + coldstr[0] = '\0'; + pfunc = find_symbol_by_name(elf, sym->name); + coldstr[0] = '.'; + + if (!pfunc) { + WARN("%s(): can't find parent function", + sym->name); + goto err; + } + + sym->pfunc = pfunc; + pfunc->cfunc = sym; + } + } + } + return 0; err: diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index d86e2ff14466..de5cd2ddded9 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,6 +61,7 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; + struct symbol *pfunc, *cfunc; }; struct rela { @@ -86,6 +87,7 @@ struct elf { struct elf *elf_open(const char *name, int flags); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 016882dbbc16..ee86473643be 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -16,7 +16,7 @@ nm -g $libc 2>/dev/null | fgrep -q inet_pton || exit 254 trace_libc_inet_pton_backtrace() { idx=0 expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" - expected[1]=".*inet_pton[[:space:]]\($libc\)$" + expected[1]=".*inet_pton[[:space:]]\($libc|inlined\)$" case "$(uname -m)" in s390x) eventattr='call-graph=dwarf,max-stack=4' diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 536ee148bff8..5d74a30fe00f 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1263,6 +1263,9 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start max_percent = sample->percent; } + if (al->samples_nr > nr_percent) + nr_percent = al->samples_nr; + if (max_percent < min_pcnt) return -1; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 40020b1ca54f..bf16dc9ee507 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -239,6 +239,7 @@ static void cs_etm__free(struct perf_session *session) for (i = 0; i < aux->num_cpu; i++) zfree(&aux->metadata[i]); + thread__zput(aux->unknown_thread); zfree(&aux->metadata); zfree(&aux); } @@ -612,8 +613,8 @@ cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) return buff->len; } -static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, - struct auxtrace_queue *queue) +static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, + struct auxtrace_queue *queue) { struct cs_etm_queue *etmq = queue->priv; @@ -1357,6 +1358,23 @@ int cs_etm__process_auxtrace_info(union perf_event *event, etm->auxtrace.free = cs_etm__free; session->auxtrace = &etm->auxtrace; + etm->unknown_thread = thread__new(999999999, 999999999); + if (!etm->unknown_thread) + goto err_free_queues; + + /* + * Initialize list node so that at thread__zput() we can avoid + * segmentation fault at list_del_init(). + */ + INIT_LIST_HEAD(&etm->unknown_thread->node); + + err = thread__set_comm(etm->unknown_thread, "unknown", 0); + if (err) + goto err_delete_thread; + + if (thread__init_map_groups(etm->unknown_thread, etm->machine)) + goto err_delete_thread; + if (dump_trace) { cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu); return 0; @@ -1371,16 +1389,18 @@ int cs_etm__process_auxtrace_info(union perf_event *event, err = cs_etm__synth_events(etm, session); if (err) - goto err_free_queues; + goto err_delete_thread; err = auxtrace_queues__process_index(&etm->queues, session); if (err) - goto err_free_queues; + goto err_delete_thread; etm->data_queued = etm->queues.populated; return 0; +err_delete_thread: + thread__zput(etm->unknown_thread); err_free_queues: auxtrace_queues__free(&etm->queues); session->auxtrace = NULL; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2fb0272146d8..b8b8a9558d32 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1715,7 +1715,7 @@ int parse_events(struct perf_evlist *evlist, const char *str, struct perf_evsel *last; if (list_empty(&parse_state.list)) { - WARN_ONCE(true, "WARNING: event parser found nothing"); + WARN_ONCE(true, "WARNING: event parser found nothing\n"); return -1; } diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py index abb4c38f029b..8ee626c0f6a5 100755 --- a/tools/power/pm-graph/bootgraph.py +++ b/tools/power/pm-graph/bootgraph.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python2 # # Tool for analyzing boot timing # Copyright (c) 2013, Intel Corporation. diff --git a/tools/power/pm-graph/sleepgraph.8 b/tools/power/pm-graph/sleepgraph.8 index 18baaf6300c9..070be2cf7f74 100644 --- a/tools/power/pm-graph/sleepgraph.8 +++ b/tools/power/pm-graph/sleepgraph.8 @@ -168,6 +168,7 @@ Create a summary page of all tests in \fIindir\fR. Creates summary.html in the current folder. The output page is a table of tests with suspend and resume values sorted by suspend mode, host, and kernel. Includes test averages by mode and links to the test html files. +Use -genhtml to include tests with missing html. .TP \fB-modes\fR List available suspend modes. @@ -179,6 +180,9 @@ with any options you intend to use to see if they will work. \fB-fpdt\fR Print out the contents of the ACPI Firmware Performance Data Table. .TP +\fB-battery\fR +Print out battery status and current charge. +.TP \fB-sysinfo\fR Print out system info extracted from BIOS. Reads /dev/mem directly instead of going through dmidecode. .TP diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py index 266409fb27ae..0c760478f7d7 100755 --- a/tools/power/pm-graph/sleepgraph.py +++ b/tools/power/pm-graph/sleepgraph.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python2 # # Tool for analyzing suspend/resume timing # Copyright (c) 2013, Intel Corporation. @@ -69,7 +69,7 @@ from subprocess import call, Popen, PIPE # store system values and test parameters class SystemValues: title = 'SleepGraph' - version = '5.0' + version = '5.1' ansi = False rs = 0 display = 0 @@ -240,7 +240,7 @@ class SystemValues: kprobes = dict() timeformat = '%.3f' cmdline = '%s %s' % \ - (os.path.basename(sys.argv[0]), string.join(sys.argv[1:], ' ')) + (os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:])) def __init__(self): self.archargs = 'args_'+platform.machine() self.hostname = platform.node() @@ -917,12 +917,18 @@ class Data: self.devicegroups.append([phase]) self.errorinfo = {'suspend':[],'resume':[]} def extractErrorInfo(self): + elist = { + 'HWERROR' : '.*\[ *Hardware Error *\].*', + 'FWBUG' : '.*\[ *Firmware Bug *\].*', + 'BUG' : '.*BUG.*', + 'ERROR' : '.*ERROR.*', + 'WARNING' : '.*WARNING.*', + 'IRQ' : '.*genirq: .*', + 'TASKFAIL': '.*Freezing of tasks failed.*', + } lf = sysvals.openlog(sysvals.dmesgfile, 'r') i = 0 list = [] - # sl = start line, et = error time, el = error line - type = 'ERROR' - sl = et = el = -1 for line in lf: i += 1 m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line) @@ -931,43 +937,13 @@ class Data: t = float(m.group('ktime')) if t < self.start or t > self.end: continue - if t < self.tSuspended: - dir = 'suspend' - else: - dir = 'resume' + dir = 'suspend' if t < self.tSuspended else 'resume' msg = m.group('msg') - if re.match('-*\[ *cut here *\]-*', msg): - type = 'WARNING' - sl = i - elif re.match('genirq: .*', msg): - type = 'IRQ' - sl = i - elif re.match('BUG: .*', msg) or re.match('kernel BUG .*', msg): - type = 'BUG' - sl = i - elif re.match('-*\[ *end trace .*\]-*', msg) or \ - re.match('R13: .*', msg): - if et >= 0 and sl >= 0: - list.append((type, dir, et, sl, i)) - self.kerror = True - sl = et = el = -1 - type = 'ERROR' - elif 'Call Trace:' in msg: - if el >= 0 and et >= 0: - list.append((type, dir, et, el, el)) + for err in elist: + if re.match(elist[err], msg): + list.append((err, dir, t, i, i)) self.kerror = True - et, el = t, i - if sl < 0 or type == 'BUG': - slval = i - if sl >= 0: - slval = sl - list.append((type, dir, et, slval, i)) - self.kerror = True - sl = et = el = -1 - type = 'ERROR' - if el >= 0 and et >= 0: - list.append((type, dir, et, el, el)) - self.kerror = True + break for e in list: type, dir, t, idx1, idx2 = e sysvals.vprint('kernel %s found in %s at %f' % (type, dir, t)) @@ -2331,12 +2307,14 @@ class TestProps: sv.suspendmode = data.stamp['mode'] if sv.suspendmode == 'command' and sv.ftracefile != '': modes = ['on', 'freeze', 'standby', 'mem', 'disk'] - out = Popen(['grep', 'machine_suspend', sv.ftracefile], - stderr=PIPE, stdout=PIPE).stdout.read() - m = re.match('.* machine_suspend\[(?P<mode>.*)\]', out) - if m and m.group('mode') in ['1', '2', '3', '4']: - sv.suspendmode = modes[int(m.group('mode'))] - data.stamp['mode'] = sv.suspendmode + fp = sysvals.openlog(sv.ftracefile, 'r') + for line in fp: + m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line) + if m and m.group('mode') in ['1', '2', '3', '4']: + sv.suspendmode = modes[int(m.group('mode'))] + data.stamp['mode'] = sv.suspendmode + break + fp.close() m = re.match(self.cmdlinefmt, self.cmdline) if m: sv.cmdline = m.group('cmd') @@ -2413,7 +2391,7 @@ class ProcessMonitor: # markers, and/or kprobes required for primary parsing. def doesTraceLogHaveTraceEvents(): kpcheck = ['_cal: (', '_cpu_down()'] - techeck = sysvals.traceevents[:] + techeck = ['suspend_resume'] tmcheck = ['SUSPEND START', 'RESUME COMPLETE'] sysvals.usekprobes = False fp = sysvals.openlog(sysvals.ftracefile, 'r') @@ -2808,7 +2786,7 @@ def parseTraceLog(live=False): # -- phase changes -- # start of kernel suspend if(re.match('suspend_enter\[.*', t.name)): - if(isbegin): + if(isbegin and data.start == data.tKernSus): data.dmesg[phase]['start'] = t.time data.tKernSus = t.time continue @@ -3072,13 +3050,20 @@ def parseTraceLog(live=False): sysvals.vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name)) cg.newActionFromFunction(data) if sysvals.suspendmode == 'command': - return testdata + return (testdata, '') # fill in any missing phases + error = [] for data in testdata: + tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1)) + terr = '' lp = data.phases[0] for p in data.phases: if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0): + if not terr: + print 'TEST%s FAILED: %s failed in %s phase' % (tn, sysvals.suspendmode, lp) + terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, lp) + error.append(terr) sysvals.vprint('WARNING: phase "%s" is missing!' % p) if(data.dmesg[p]['start'] < 0): data.dmesg[p]['start'] = data.dmesg[lp]['end'] @@ -3106,7 +3091,7 @@ def parseTraceLog(live=False): for j in range(i + 1, tc): testdata[j].mergeOverlapDevices(devlist) testdata[0].stitchTouchingThreads(testdata[1:]) - return testdata + return (testdata, ', '.join(error)) # Function: loadKernelLog # Description: @@ -3173,7 +3158,7 @@ def loadKernelLog(): if data: testruns.append(data) if len(testruns) < 1: - doError(' dmesg log has no suspend/resume data: %s' \ + print('ERROR: dmesg log has no suspend/resume data: %s' \ % sysvals.dmesgfile) # fix lines with same timestamp/function with the call and return swapped @@ -3521,68 +3506,144 @@ def createHTMLSummarySimple(testruns, htmlfile, folder): .summary {border:1px solid;}\n\ th {border: 1px solid black;background:#222;color:white;}\n\ td {font: 16px "Times New Roman";text-align: center;}\n\ - tr.alt td {background:#ddd;}\n\ - tr.avg td {background:#aaa;}\n\ + tr.head td {border: 1px solid black;background:#aaa;}\n\ + tr.alt {background-color:#ddd;}\n\ + tr.notice {color:red;}\n\ + .minval {background-color:#BBFFBB;}\n\ + .medval {background-color:#BBBBFF;}\n\ + .maxval {background-color:#FFBBBB;}\n\ + .head a {color:#000;text-decoration: none;}\n\ </style>\n</head>\n<body>\n' + # extract the test data into list + list = dict() + tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [[], []] + iMin, iMed, iMax = [0, 0], [0, 0], [0, 0] + num = 0 + lastmode = '' + cnt = {'pass':0, 'fail':0, 'hang':0} + for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])): + mode = data['mode'] + if mode not in list: + list[mode] = {'data': [], 'avg': [0,0], 'min': [0,0], 'max': [0,0], 'med': [0,0]} + if lastmode and lastmode != mode and num > 0: + for i in range(2): + s = sorted(tMed[i]) + list[lastmode]['med'][i] = s[int(len(s)/2)] + iMed[i] = tMed[i].index(list[lastmode]['med'][i]) + list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num] + list[lastmode]['min'] = tMin + list[lastmode]['max'] = tMax + list[lastmode]['idx'] = (iMin, iMed, iMax) + tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [[], []] + iMin, iMed, iMax = [0, 0], [0, 0], [0, 0] + num = 0 + tVal = [float(data['suspend']), float(data['resume'])] + list[mode]['data'].append([data['host'], data['kernel'], + data['time'], tVal[0], tVal[1], data['url'], data['result'], + data['issues']]) + idx = len(list[mode]['data']) - 1 + if data['result'] == 'pass': + cnt['pass'] += 1 + for i in range(2): + tMed[i].append(tVal[i]) + tAvg[i] += tVal[i] + if tMin[i] == 0 or tVal[i] < tMin[i]: + iMin[i] = idx + tMin[i] = tVal[i] + if tMax[i] == 0 or tVal[i] > tMax[i]: + iMax[i] = idx + tMax[i] = tVal[i] + num += 1 + elif data['result'] == 'hang': + cnt['hang'] += 1 + elif data['result'] == 'fail': + cnt['fail'] += 1 + lastmode = mode + if lastmode and num > 0: + for i in range(2): + s = sorted(tMed[i]) + list[lastmode]['med'][i] = s[int(len(s)/2)] + iMed[i] = tMed[i].index(list[lastmode]['med'][i]) + list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num] + list[lastmode]['min'] = tMin + list[lastmode]['max'] = tMax + list[lastmode]['idx'] = (iMin, iMed, iMax) + # group test header - html += '<div class="stamp">%s (%d tests)</div>\n' % (folder, len(testruns)) + desc = [] + for ilk in sorted(cnt, reverse=True): + if cnt[ilk] > 0: + desc.append('%d %s' % (cnt[ilk], ilk)) + html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (folder, len(testruns), ', '.join(desc)) th = '\t<th>{0}</th>\n' td = '\t<td>{0}</td>\n' + tdh = '\t<td{1}>{0}</td>\n' tdlink = '\t<td><a href="{0}">html</a></td>\n' # table header html += '<table class="summary">\n<tr>\n' + th.format('#') +\ th.format('Mode') + th.format('Host') + th.format('Kernel') +\ - th.format('Test Time') + th.format('Suspend') + th.format('Resume') +\ - th.format('Detail') + '</tr>\n' - - # test data, 1 row per test - avg = '<tr class="avg"><td></td><td></td><td></td><td></td>'+\ - '<td>Average of {0} {1} tests</td><td>{2}</td><td>{3}</td><td></td></tr>\n' - sTimeAvg = rTimeAvg = 0.0 - mode = '' - num = 0 - for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])): - if mode != data['mode']: - # test average line - if(num > 0): - sTimeAvg /= (num - 1) - rTimeAvg /= (num - 1) - html += avg.format('%d' % (num - 1), mode, - '%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg) - sTimeAvg = rTimeAvg = 0.0 - mode = data['mode'] - num = 1 - # alternate row color - if num % 2 == 1: - html += '<tr class="alt">\n' + th.format('Test Time') + th.format('Result') + th.format('Issues') +\ + th.format('Suspend') + th.format('Resume') + th.format('Detail') + '</tr>\n' + + # export list into html + head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\ + '<td colspan=8 class="sus">Suspend Avg={2} '+\ + '<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\ + '<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\ + '<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\ + 'Resume Avg={6} '+\ + '<span class=minval><a href="#r{10}min">Min={7}</a></span> '+\ + '<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\ + '<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\ + '</tr>\n' + headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan=8></td></tr>\n' + for mode in list: + # header line for each suspend mode + num = 0 + tAvg, tMin, tMax, tMed = list[mode]['avg'], list[mode]['min'],\ + list[mode]['max'], list[mode]['med'] + count = len(list[mode]['data']) + if 'idx' in list[mode]: + iMin, iMed, iMax = list[mode]['idx'] + html += head.format('%d' % count, mode.upper(), + '%.3f' % tAvg[0], '%.3f' % tMin[0], '%.3f' % tMed[0], '%.3f' % tMax[0], + '%.3f' % tAvg[1], '%.3f' % tMin[1], '%.3f' % tMed[1], '%.3f' % tMax[1], + mode.lower() + ) else: - html += '<tr>\n' - html += td.format("%d" % num) - num += 1 - # basic info - for item in ['mode', 'host', 'kernel', 'time']: - val = "unknown" - if(item in data): - val = data[item] - html += td.format(val) - # suspend time - sTime = float(data['suspend']) - sTimeAvg += sTime - html += td.format('%.3f ms' % sTime) - # resume time - rTime = float(data['resume']) - rTimeAvg += rTime - html += td.format('%.3f ms' % rTime) - # link to the output html - html += tdlink.format(data['url']) + '</tr>\n' - # last test average line - if(num > 0): - sTimeAvg /= (num - 1) - rTimeAvg /= (num - 1) - html += avg.format('%d' % (num - 1), mode, - '%3.3f ms' % sTimeAvg, '%3.3f ms' % rTimeAvg) + iMin = iMed = iMax = [-1, -1, -1] + html += headnone.format('%d' % count, mode.upper()) + for d in list[mode]['data']: + # row classes - alternate row color + rcls = ['alt'] if num % 2 == 1 else [] + if d[6] != 'pass': + rcls.append('notice') + html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n' + # figure out if the line has sus or res highlighted + idx = list[mode]['data'].index(d) + tHigh = ['', ''] + for i in range(2): + tag = 's%s' % mode if i == 0 else 'r%s' % mode + if idx == iMin[i]: + tHigh[i] = ' id="%smin" class=minval title="Minimum"' % tag + elif idx == iMax[i]: + tHigh[i] = ' id="%smax" class=maxval title="Maximum"' % tag + elif idx == iMed[i]: + tHigh[i] = ' id="%smed" class=medval title="Median"' % tag + html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row + html += td.format(mode) # mode + html += td.format(d[0]) # host + html += td.format(d[1]) # kernel + html += td.format(d[2]) # time + html += td.format(d[6]) # result + html += td.format(d[7]) # issues + html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend + html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume + html += tdlink.format(d[5]) if d[5] else td.format('') # url + html += '</tr>\n' + num += 1 # flush the data to file hf = open(htmlfile, 'w') @@ -3607,7 +3668,7 @@ def ordinal(value): # testruns: array of Data objects from parseKernelLog or parseTraceLog # Output: # True if the html file was created, false if it failed -def createHTML(testruns): +def createHTML(testruns, testfail): if len(testruns) < 1: print('ERROR: Not enough test data to build a timeline') return @@ -3641,6 +3702,7 @@ def createHTML(testruns): '<td class="purple">{4}Firmware Resume: {2} ms</td>'\ '<td class="yellow" title="time from firmware mode to return from kernel enter_state({5}) [kernel time only]">{4}Kernel Resume: {3} ms</td>'\ '</tr>\n</table>\n' + html_fail = '<table class="testfail"><tr><td>{0}</td></tr></table>\n' # html format variables scaleH = 20 @@ -3708,6 +3770,9 @@ def createHTML(testruns): resume_time, testdesc, stitle, rtitle) devtl.html += thtml + if testfail: + devtl.html += html_fail.format(testfail) + # time scale for potentially multiple datasets t0 = testruns[0].start tMax = testruns[-1].end @@ -4006,6 +4071,7 @@ def addCSS(hf, sv, testcount=1, kerror=False, extra=''): .blue {background:rgba(169,208,245,0.4);}\n\ .time1 {font:22px Arial;border:1px solid;}\n\ .time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\ + .testfail {font:bold 22px Arial;color:red;border:1px dashed;}\n\ td {text-align:center;}\n\ r {color:#500000;font:15px Tahoma;}\n\ n {color:#505050;font:15px Tahoma;}\n\ @@ -4927,6 +4993,25 @@ def dmidecode(mempath, fatal=False): count += 1 return out +def getBattery(): + p = '/sys/class/power_supply' + bat = dict() + for d in os.listdir(p): + type = sysvals.getVal(os.path.join(p, d, 'type')).strip().lower() + if type != 'battery': + continue + for v in ['status', 'energy_now', 'capacity_now']: + bat[v] = sysvals.getVal(os.path.join(p, d, v)).strip().lower() + break + ac = True + if 'status' in bat and 'discharging' in bat['status']: + ac = False + charge = 0 + for v in ['energy_now', 'capacity_now']: + if v in bat and bat[v]: + charge = int(bat[v]) + return (ac, charge) + # Function: getFPDT # Description: # Read the acpi bios tables and pull out FPDT, the firmware data @@ -5202,8 +5287,9 @@ def getArgFloat(name, args, min, max, main=True): def processData(live=False): print('PROCESSING DATA') + error = '' if(sysvals.usetraceevents): - testruns = parseTraceLog(live) + testruns, error = parseTraceLog(live) if sysvals.dmesgfile: for data in testruns: data.extractErrorInfo() @@ -5220,15 +5306,18 @@ def processData(live=False): for data in testruns: data.debugPrint() sys.exit() - + if len(testruns) < 1: + return (testruns, {'error': 'timeline generation failed'}) sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile) - createHTML(testruns) + createHTML(testruns, error) print('DONE') data = testruns[0] stamp = data.stamp stamp['suspend'], stamp['resume'] = data.getTimeValues() if data.fwValid: stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume + if error: + stamp['error'] = error return (testruns, stamp) # Function: rerunTest @@ -5268,58 +5357,88 @@ def runTest(n=0): sysvals.sudouser(sysvals.testdir) sysvals.outputResult(stamp, n) -def find_in_html(html, strs, div=False): - for str in strs: - l = len(str) - i = html.find(str) - if i >= 0: +def find_in_html(html, start, end, firstonly=True): + n, out = 0, [] + while n < len(html): + m = re.search(start, html[n:]) + if not m: break - if i < 0: - return '' - if not div: - return re.search(r'[-+]?\d*\.\d+|\d+', html[i+l:i+l+50]).group() - n = html[i+l:].find('</div>') - if n < 0: + i = m.end() + m = re.search(end, html[n+i:]) + if not m: + break + j = m.start() + str = html[n+i:n+i+j] + if end == 'ms': + num = re.search(r'[-+]?\d*\.\d+|\d+', str) + str = num.group() if num else 'NaN' + if firstonly: + return str + out.append(str) + n += i+j + if firstonly: return '' - return html[i+l:i+l+n] + return out # Function: runSummary # Description: # create a summary of tests in a sub-directory -def runSummary(subdir, local=True): +def runSummary(subdir, local=True, genhtml=False): inpath = os.path.abspath(subdir) outpath = inpath if local: outpath = os.path.abspath('.') print('Generating a summary of folder "%s"' % inpath) + if genhtml: + for dirname, dirnames, filenames in os.walk(subdir): + sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = '' + for filename in filenames: + if(re.match('.*_dmesg.txt', filename)): + sysvals.dmesgfile = os.path.join(dirname, filename) + elif(re.match('.*_ftrace.txt', filename)): + sysvals.ftracefile = os.path.join(dirname, filename) + sysvals.setOutputFile() + if sysvals.ftracefile and sysvals.htmlfile and \ + not os.path.exists(sysvals.htmlfile): + print('FTRACE: %s' % sysvals.ftracefile) + if sysvals.dmesgfile: + print('DMESG : %s' % sysvals.dmesgfile) + rerunTest() testruns = [] for dirname, dirnames, filenames in os.walk(subdir): for filename in filenames: if(not re.match('.*.html', filename)): continue file = os.path.join(dirname, filename) - html = open(file, 'r').read(10000) - suspend = find_in_html(html, - ['Kernel Suspend: ', 'Kernel Suspend Time: ']) - resume = find_in_html(html, - ['Kernel Resume: ', 'Kernel Resume Time: ']) - line = find_in_html(html, ['<div class="stamp">'], True) + html = open(file, 'r').read() + suspend = find_in_html(html, 'Kernel Suspend', 'ms') + resume = find_in_html(html, 'Kernel Resume', 'ms') + line = find_in_html(html, '<div class="stamp">', '</div>') stmp = line.split() - if not suspend or not resume or len(stmp) < 4: + if not suspend or not resume or len(stmp) != 8: continue + try: + dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p') + except: + continue + tstr = dt.strftime('%Y/%m/%d %H:%M:%S') + error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>') + result = 'fail' if error else 'pass' + ilist = [] + e = find_in_html(html, 'class="err"[\w=":;\.%\- ]*>', '→</div>', False) + for i in list(set(e)): + ilist.append('%sx%d' % (i, e.count(i)) if e.count(i) > 1 else i) data = { + 'mode': stmp[2], 'host': stmp[0], 'kernel': stmp[1], - 'mode': stmp[2], - 'time': string.join(stmp[3:], ' '), + 'time': tstr, + 'result': result, + 'issues': ','.join(ilist), 'suspend': suspend, 'resume': resume, 'url': os.path.relpath(file, outpath), } - if len(stmp) == 7: - data['kernel'] = 'unknown' - data['mode'] = stmp[1] - data['time'] = string.join(stmp[2:], ' ') testruns.append(data) outfile = os.path.join(outpath, 'summary.html') print('Summary file: %s' % outfile) @@ -5609,11 +5728,12 @@ def printHelp(): print(' -modes List available suspend modes') print(' -status Test to see if the system is enabled to run this tool') print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table') + print(' -battery Print out battery info (if available)') print(' -sysinfo Print out system info extracted from BIOS') print(' -devinfo Print out the pm settings of all devices which support runtime suspend') print(' -flist Print the list of functions currently being captured in ftrace') print(' -flistall Print all functions capable of being captured in ftrace') - print(' -summary directory Create a summary of all test in this dir') + print(' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]') print(' [redo]') print(' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)') print(' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)') @@ -5623,8 +5743,9 @@ def printHelp(): # ----------------- MAIN -------------------- # exec start (skipped if script is loaded as library) if __name__ == '__main__': + genhtml = False cmd = '' - simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', '-devinfo', '-status'] + simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall', '-devinfo', '-status', '-battery'] if '-f' in sys.argv: sysvals.cgskip = sysvals.configFile('cgskip.txt') # loop through the command line arguments @@ -5660,6 +5781,8 @@ if __name__ == '__main__': sysvals.skiphtml = True elif(arg == '-cgdump'): sysvals.cgdump = True + elif(arg == '-genhtml'): + genhtml = True elif(arg == '-addlogs'): sysvals.dmesglog = sysvals.ftracelog = True elif(arg == '-verbose'): @@ -5856,6 +5979,8 @@ if __name__ == '__main__': statusCheck(True) elif(cmd == 'fpdt'): getFPDT(True) + elif(cmd == 'battery'): + print 'AC Connect: %s\nCharge: %d' % getBattery() elif(cmd == 'sysinfo'): sysvals.printSystemInfo(True) elif(cmd == 'devinfo'): @@ -5867,7 +5992,7 @@ if __name__ == '__main__': elif(cmd == 'flistall'): sysvals.getFtraceFilterFunctions(False) elif(cmd == 'summary'): - runSummary(sysvals.outdir, True) + runSummary(sysvals.outdir, True, genhtml) sys.exit() # if instructed, re-analyze existing data files @@ -5920,7 +6045,7 @@ if __name__ == '__main__': print('TEST (%d/%d) COMPLETE' % (i+1, sysvals.multitest['count'])) sysvals.logmsg = '' if not sysvals.skiphtml: - runSummary(sysvals.outdir, False) + runSummary(sysvals.outdir, False, False) sysvals.sudouser(sysvals.outdir) else: if sysvals.outdir: diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py index 29f50d4cfea0..84e2b648e622 100755 --- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py +++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -28,6 +28,7 @@ import subprocess import os import time import re +import signal import sys import getopt import Gnuplot @@ -78,11 +79,12 @@ def print_help(): print(' Or') print(' ./intel_pstate_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>') print(' To generate trace file, parse and plot, use (sudo required):') - print(' sudo ./intel_pstate_tracer.py [-c cpus] -i <interval> -n <test_name>') + print(' sudo ./intel_pstate_tracer.py [-c cpus] -i <interval> -n <test_name> -m <kbytes>') print(' Or') - print(' sudo ./intel_pstate_tracer.py [--cpu cpus] --interval <interval> --name <test_name>') + print(' sudo ./intel_pstate_tracer.py [--cpu cpus] --interval <interval> --name <test_name> --memory <kbytes>') print(' Optional argument:') - print(' cpus: comma separated list of CPUs') + print(' cpus: comma separated list of CPUs') + print(' kbytes: Kilo bytes of memory per CPU to allocate to the trace buffer. Default: 10240') print(' Output:') print(' If not already present, creates a "results/test_name" folder in the current working directory with:') print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.') @@ -379,7 +381,7 @@ def clear_trace_file(): f_handle.close() except: print('IO error clearing trace file ') - quit() + sys.exit(2) def enable_trace(): """ Enable trace """ @@ -389,7 +391,7 @@ def enable_trace(): , 'w').write("1") except: print('IO error enabling trace ') - quit() + sys.exit(2) def disable_trace(): """ Disable trace """ @@ -399,17 +401,17 @@ def disable_trace(): , 'w').write("0") except: print('IO error disabling trace ') - quit() + sys.exit(2) def set_trace_buffer_size(): """ Set trace buffer size """ try: - open('/sys/kernel/debug/tracing/buffer_size_kb' - , 'w').write("10240") + with open('/sys/kernel/debug/tracing/buffer_size_kb', 'w') as fp: + fp.write(memory) except: - print('IO error setting trace buffer size ') - quit() + print('IO error setting trace buffer size ') + sys.exit(2) def free_trace_buffer(): """ Free the trace buffer memory """ @@ -418,8 +420,8 @@ def free_trace_buffer(): open('/sys/kernel/debug/tracing/buffer_size_kb' , 'w').write("1") except: - print('IO error setting trace buffer size ') - quit() + print('IO error freeing trace buffer ') + sys.exit(2) def read_trace_data(filename): """ Read and parse trace data """ @@ -431,7 +433,7 @@ def read_trace_data(filename): data = open(filename, 'r').read() except: print('Error opening ', filename) - quit() + sys.exit(2) for line in data.splitlines(): search_obj = \ @@ -489,10 +491,22 @@ def read_trace_data(filename): # Now seperate the main overall csv file into per CPU csv files. split_csv() +def signal_handler(signal, frame): + print(' SIGINT: Forcing cleanup before exit.') + if interval: + disable_trace() + clear_trace_file() + # Free the memory + free_trace_buffer() + sys.exit(0) + +signal.signal(signal.SIGINT, signal_handler) + interval = "" filename = "" cpu_list = "" testname = "" +memory = "10240" graph_data_present = False; valid1 = False @@ -501,7 +515,7 @@ valid2 = False cpu_mask = zeros((MAX_CPUS,), dtype=int) try: - opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:",["help","trace_file=","interval=","cpu=","name="]) + opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="]) except getopt.GetoptError: print_help() sys.exit(2) @@ -521,6 +535,8 @@ for opt, arg in opts: elif opt in ("-n", "--name"): valid2 = True testname = arg + elif opt in ("-m", "--memory"): + memory = arg if not (valid1 and valid2): print_help() @@ -569,6 +585,11 @@ current_max_cpu = 0 read_trace_data(filename) +clear_trace_file() +# Free the memory +if interval: + free_trace_buffer() + if graph_data_present == False: print('No valid data to plot') sys.exit(2) @@ -593,9 +614,4 @@ for root, dirs, files in os.walk('.'): for f in files: fix_ownership(f) -clear_trace_file() -# Free the memory -if interval: - free_trace_buffer() - os.chdir('../../') diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile index a9bc914a8fe8..2ab25aa38263 100644 --- a/tools/power/x86/turbostat/Makefile +++ b/tools/power/x86/turbostat/Makefile @@ -25,4 +25,4 @@ install : turbostat install -d $(DESTDIR)$(PREFIX)/bin install $(BUILD_OUTPUT)/turbostat $(DESTDIR)$(PREFIX)/bin/turbostat install -d $(DESTDIR)$(PREFIX)/share/man/man8 - install turbostat.8 $(DESTDIR)$(PREFIX)/share/man/man8 + install -m 644 turbostat.8 $(DESTDIR)$(PREFIX)/share/man/man8 diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index ccf2a69365cc..ca9ef7017624 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -54,9 +54,12 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--cpu cpu-set\fP limit output to system summary plus the specified cpu-set. If cpu-set is the string "core", then the system summary plus the first CPU in each core are printed -- eg. subsequent HT siblings are not printed. Or if cpu-set is the string "package", then the system summary plus the first CPU in each package is printed. Otherwise, the system summary plus the specified set of CPUs are printed. The cpu-set is ordered from low to high, comma delimited with ".." and "-" permitted to denote a range. eg. 1,2,8,14..17,21-44 .PP -\fB--hide column\fP do not show the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. +\fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group. .PP -\fB--show column\fP show only the specified columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. +\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds". +The column name "all" can be used to enable all disabled-by-default built-in counters. +.PP +\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group. .PP \fB--Dump\fP displays the raw counter values. .PP @@ -64,6 +67,8 @@ name as necessary to disambiguate it from others is necessary. Note that option .PP \fB--interval seconds\fP overrides the default 5.0 second measurement interval. .PP +\fB--num_iterations num\fP number of the measurement iterations. +.PP \fB--out output_file\fP turbostat output is written to the specified output_file. The file is truncated if it already exists, and it is created if it does not exist. .PP @@ -86,6 +91,8 @@ displays the statistics gathered since it was forked. The system configuration dump (if --quiet is not used) is followed by statistics. The first row of the statistics labels the content of each column (below). The second row of statistics is the system summary line. The system summary line has a '-' in the columns for the Package, Core, and CPU. The contents of the system summary line depends on the type of column. Columns that count items (eg. IRQ) show the sum across all CPUs in the system. Columns that show a percentage show the average across all CPUs in the system. Columns that dump raw MSR values simply show 0 in the summary. After the system summary row, each row describes a specific Package/Core/CPU. Note that if the --cpu parameter is used to limit which specific CPUs are displayed, turbostat will still collect statistics for all CPUs in the system and will still show the system summary for all CPUs in the system. .SH COLUMN DESCRIPTIONS .nf +\fBusec\fP For each CPU, the number of microseconds elapsed during counter collection, including thread migration -- if any. This counter is disabled by default, and is enabled with "--enable usec", or --debug. On the summary row, usec refers to the total elapsed time to collect the counters on all cpus. +\fBTime_Of_Day_Seconds\fP For each CPU, the gettimeofday(2) value (seconds.subsec since Epoch) when the counters ending the measurement interval were collected. This column is disabled by default, and can be enabled with "--enable Time_Of_Day_Seconds" or "--debug". On the summary row, Time_Of_Day_Seconds refers to the timestamp following collection of counters on the last CPU. \fBCore\fP processor core number. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT). \fBCPU\fP Linux CPU (logical processor) number. Yes, it is okay that on many systems the CPUs are not listed in numerical order -- for efficiency reasons, turbostat runs in topology order, so HT siblings appear together. \fBPackage\fP processor package number -- not present on systems with a single processor package. @@ -262,6 +269,21 @@ CPU PRF_CTRL .fi +.SH INPUT + +For interval-mode, turbostat will immediately end the current interval +when it sees a newline on standard input. +turbostat will then start the next interval. +Control-C will be send a SIGINT to turbostat, +which will immediately abort the program with no further processing. +.SH SIGNALS + +SIGINT will interrupt interval-mode. +The end-of-interval data will be collected and displayed before turbostat exits. + +SIGUSR1 will end current interval, +end-of-interval data will be collected and displayed before turbostat +starts a new interval. .SH NOTES .B "turbostat " diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bd9c6b31a504..d6cff3070ebd 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -29,6 +29,7 @@ #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> +#include <sys/select.h> #include <sys/resource.h> #include <fcntl.h> #include <signal.h> @@ -47,9 +48,13 @@ char *proc_stat = "/proc/stat"; FILE *outf; int *fd_percpu; +struct timeval interval_tv = {5, 0}; struct timespec interval_ts = {5, 0}; +struct timespec one_msec = {0, 1000000}; +unsigned int num_iterations; unsigned int debug; unsigned int quiet; +unsigned int shown; unsigned int sums_need_wide_columns; unsigned int rapl_joules; unsigned int summary_only; @@ -58,6 +63,7 @@ unsigned int dump_only; unsigned int do_snb_cstates; unsigned int do_knl_cstates; unsigned int do_slm_cstates; +unsigned int do_cnl_cstates; unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; @@ -80,6 +86,8 @@ unsigned int do_rapl; unsigned int do_dts; unsigned int do_ptm; unsigned long long gfx_cur_rc6_ms; +unsigned long long cpuidle_cur_cpu_lpi_us; +unsigned long long cpuidle_cur_sys_lpi_us; unsigned int gfx_cur_mhz; unsigned int tcc_activation_temp; unsigned int tcc_activation_temp_override; @@ -87,6 +95,7 @@ double rapl_power_units, rapl_time_units; double rapl_dram_energy_units, rapl_energy_units; double rapl_joule_counter_range; unsigned int do_core_perf_limit_reasons; +unsigned int has_automatic_cstate_conversion; unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; @@ -147,7 +156,9 @@ char *progname; #define CPU_SUBSET_MAXCPUS 1024 /* need to use before probe... */ cpu_set_t *cpu_present_set, *cpu_affinity_set, *cpu_subset; size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size; -#define MAX_ADDED_COUNTERS 16 +#define MAX_ADDED_COUNTERS 8 +#define MAX_ADDED_THREAD_COUNTERS 24 +#define BITMASK_SIZE 32 struct thread_data { struct timeval tv_begin; @@ -162,7 +173,7 @@ struct thread_data { unsigned int flags; #define CPU_IS_FIRST_THREAD_IN_CORE 0x2 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4 - unsigned long long counter[MAX_ADDED_COUNTERS]; + unsigned long long counter[MAX_ADDED_THREAD_COUNTERS]; } *thread_even, *thread_odd; struct core_data { @@ -183,6 +194,8 @@ struct pkg_data { unsigned long long pc8; unsigned long long pc9; unsigned long long pc10; + unsigned long long cpu_lpi; + unsigned long long sys_lpi; unsigned long long pkg_wtd_core_c0; unsigned long long pkg_any_core_c0; unsigned long long pkg_any_gfxe_c0; @@ -203,12 +216,21 @@ struct pkg_data { #define ODD_COUNTERS thread_odd, core_odd, package_odd #define EVEN_COUNTERS thread_even, core_even, package_even -#define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \ - (thread_base + (pkg_no) * topo.num_cores_per_pkg * \ - topo.num_threads_per_core + \ - (core_no) * topo.num_threads_per_core + (thread_no)) -#define GET_CORE(core_base, core_no, pkg_no) \ - (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no)) +#define GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no) \ + ((thread_base) + \ + ((pkg_no) * \ + topo.nodes_per_pkg * topo.cores_per_node * topo.threads_per_core) + \ + ((node_no) * topo.cores_per_node * topo.threads_per_core) + \ + ((core_no) * topo.threads_per_core) + \ + (thread_no)) + +#define GET_CORE(core_base, core_no, node_no, pkg_no) \ + ((core_base) + \ + ((pkg_no) * topo.nodes_per_pkg * topo.cores_per_node) + \ + ((node_no) * topo.cores_per_node) + \ + (core_no)) + + #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) enum counter_scope {SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE}; @@ -244,14 +266,25 @@ struct system_summary { struct pkg_data packages; } average; +struct cpu_topology { + int physical_package_id; + int logical_cpu_id; + int physical_node_id; + int logical_node_id; /* 0-based count within the package */ + int physical_core_id; + int thread_id; + cpu_set_t *put_ids; /* Processing Unit/Thread IDs */ +} *cpus; struct topo_params { int num_packages; int num_cpus; int num_cores; int max_cpu_num; - int num_cores_per_pkg; - int num_threads_per_core; + int max_node_num; + int nodes_per_pkg; + int cores_per_node; + int threads_per_core; } topo; struct timeval tv_even, tv_odd, tv_delta; @@ -273,27 +306,33 @@ int cpu_is_not_present(int cpu) int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *), struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) { - int retval, pkg_no, core_no, thread_no; + int retval, pkg_no, core_no, thread_no, node_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { - for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { - for (thread_no = 0; thread_no < - topo.num_threads_per_core; ++thread_no) { - struct thread_data *t; - struct core_data *c; - struct pkg_data *p; - - t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); - - if (cpu_is_not_present(t->cpu_id)) - continue; - - c = GET_CORE(core_base, core_no, pkg_no); - p = GET_PKG(pkg_base, pkg_no); - - retval = func(t, c, p); - if (retval) - return retval; + for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { + for (node_no = 0; node_no < topo.nodes_per_pkg; + node_no++) { + for (thread_no = 0; thread_no < + topo.threads_per_core; ++thread_no) { + struct thread_data *t; + struct core_data *c; + struct pkg_data *p; + + t = GET_THREAD(thread_base, thread_no, + core_no, node_no, + pkg_no); + + if (cpu_is_not_present(t->cpu_id)) + continue; + + c = GET_CORE(core_base, core_no, + node_no, pkg_no); + p = GET_PKG(pkg_base, pkg_no); + + retval = func(t, c, p); + if (retval) + return retval; + } } } } @@ -346,6 +385,8 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr) * Thus, strings that are proper sub-sets must follow their more specific peers. */ struct msr_counter bic[] = { + { 0x0, "usec" }, + { 0x0, "Time_Of_Day_Seconds" }, { 0x0, "Package" }, { 0x0, "Avg_MHz" }, { 0x0, "Bzy_MHz" }, @@ -369,7 +410,9 @@ struct msr_counter bic[] = { { 0x0, "Pkg%pc7" }, { 0x0, "Pkg%pc8" }, { 0x0, "Pkg%pc9" }, - { 0x0, "Pkg%pc10" }, + { 0x0, "Pk%pc10" }, + { 0x0, "CPU%LPI" }, + { 0x0, "SYS%LPI" }, { 0x0, "PkgWatt" }, { 0x0, "CorWatt" }, { 0x0, "GFXWatt" }, @@ -389,62 +432,72 @@ struct msr_counter bic[] = { { 0x0, "Any%C0" }, { 0x0, "GFX%C0" }, { 0x0, "CPUGFX%" }, + { 0x0, "Node%" }, }; #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) -#define BIC_Package (1ULL << 0) -#define BIC_Avg_MHz (1ULL << 1) -#define BIC_Bzy_MHz (1ULL << 2) -#define BIC_TSC_MHz (1ULL << 3) -#define BIC_IRQ (1ULL << 4) -#define BIC_SMI (1ULL << 5) -#define BIC_Busy (1ULL << 6) -#define BIC_CPU_c1 (1ULL << 7) -#define BIC_CPU_c3 (1ULL << 8) -#define BIC_CPU_c6 (1ULL << 9) -#define BIC_CPU_c7 (1ULL << 10) -#define BIC_ThreadC (1ULL << 11) -#define BIC_CoreTmp (1ULL << 12) -#define BIC_CoreCnt (1ULL << 13) -#define BIC_PkgTmp (1ULL << 14) -#define BIC_GFX_rc6 (1ULL << 15) -#define BIC_GFXMHz (1ULL << 16) -#define BIC_Pkgpc2 (1ULL << 17) -#define BIC_Pkgpc3 (1ULL << 18) -#define BIC_Pkgpc6 (1ULL << 19) -#define BIC_Pkgpc7 (1ULL << 20) -#define BIC_Pkgpc8 (1ULL << 21) -#define BIC_Pkgpc9 (1ULL << 22) -#define BIC_Pkgpc10 (1ULL << 23) -#define BIC_PkgWatt (1ULL << 24) -#define BIC_CorWatt (1ULL << 25) -#define BIC_GFXWatt (1ULL << 26) -#define BIC_PkgCnt (1ULL << 27) -#define BIC_RAMWatt (1ULL << 28) -#define BIC_PKG__ (1ULL << 29) -#define BIC_RAM__ (1ULL << 30) -#define BIC_Pkg_J (1ULL << 31) -#define BIC_Cor_J (1ULL << 32) -#define BIC_GFX_J (1ULL << 33) -#define BIC_RAM_J (1ULL << 34) -#define BIC_Core (1ULL << 35) -#define BIC_CPU (1ULL << 36) -#define BIC_Mod_c6 (1ULL << 37) -#define BIC_sysfs (1ULL << 38) -#define BIC_Totl_c0 (1ULL << 39) -#define BIC_Any_c0 (1ULL << 40) -#define BIC_GFX_c0 (1ULL << 41) -#define BIC_CPUGFX (1ULL << 42) - -unsigned long long bic_enabled = 0xFFFFFFFFFFFFFFFFULL; -unsigned long long bic_present = BIC_sysfs; +#define BIC_USEC (1ULL << 0) +#define BIC_TOD (1ULL << 1) +#define BIC_Package (1ULL << 2) +#define BIC_Avg_MHz (1ULL << 3) +#define BIC_Bzy_MHz (1ULL << 4) +#define BIC_TSC_MHz (1ULL << 5) +#define BIC_IRQ (1ULL << 6) +#define BIC_SMI (1ULL << 7) +#define BIC_Busy (1ULL << 8) +#define BIC_CPU_c1 (1ULL << 9) +#define BIC_CPU_c3 (1ULL << 10) +#define BIC_CPU_c6 (1ULL << 11) +#define BIC_CPU_c7 (1ULL << 12) +#define BIC_ThreadC (1ULL << 13) +#define BIC_CoreTmp (1ULL << 14) +#define BIC_CoreCnt (1ULL << 15) +#define BIC_PkgTmp (1ULL << 16) +#define BIC_GFX_rc6 (1ULL << 17) +#define BIC_GFXMHz (1ULL << 18) +#define BIC_Pkgpc2 (1ULL << 19) +#define BIC_Pkgpc3 (1ULL << 20) +#define BIC_Pkgpc6 (1ULL << 21) +#define BIC_Pkgpc7 (1ULL << 22) +#define BIC_Pkgpc8 (1ULL << 23) +#define BIC_Pkgpc9 (1ULL << 24) +#define BIC_Pkgpc10 (1ULL << 25) +#define BIC_CPU_LPI (1ULL << 26) +#define BIC_SYS_LPI (1ULL << 27) +#define BIC_PkgWatt (1ULL << 26) +#define BIC_CorWatt (1ULL << 27) +#define BIC_GFXWatt (1ULL << 28) +#define BIC_PkgCnt (1ULL << 29) +#define BIC_RAMWatt (1ULL << 30) +#define BIC_PKG__ (1ULL << 31) +#define BIC_RAM__ (1ULL << 32) +#define BIC_Pkg_J (1ULL << 33) +#define BIC_Cor_J (1ULL << 34) +#define BIC_GFX_J (1ULL << 35) +#define BIC_RAM_J (1ULL << 36) +#define BIC_Core (1ULL << 37) +#define BIC_CPU (1ULL << 38) +#define BIC_Mod_c6 (1ULL << 39) +#define BIC_sysfs (1ULL << 40) +#define BIC_Totl_c0 (1ULL << 41) +#define BIC_Any_c0 (1ULL << 42) +#define BIC_GFX_c0 (1ULL << 43) +#define BIC_CPUGFX (1ULL << 44) +#define BIC_Node (1ULL << 45) + +#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD) + +unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); +unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs; #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) +#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) + #define MAX_DEFERRED 16 char *deferred_skip_names[MAX_DEFERRED]; int deferred_skip_index; @@ -469,9 +522,10 @@ void help(void) "--cpu cpu-set limit output to summary plus cpu-set:\n" " {core | package | j,k,l..m,n-p }\n" "--quiet skip decoding system configuration header\n" - "--interval sec Override default 5-second measurement interval\n" + "--interval sec.subsec Override default 5-second measurement interval\n" "--help print this help message\n" "--list list column headers only\n" + "--num_iterations num number of the measurement iterations\n" "--out file create or truncate \"file\" for all output\n" "--version print version information\n" "\n" @@ -496,6 +550,9 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) if (comma) *comma = '\0'; + if (!strcmp(name_list, "all")) + return ~0; + for (i = 0; i < MAX_BIC; ++i) { if (!strcmp(name_list, bic[i].name)) { retval |= (1ULL << i); @@ -532,10 +589,14 @@ void print_header(char *delim) struct msr_counter *mp; int printed = 0; - if (debug) - outp += sprintf(outp, "usec %s", delim); + if (DO_BIC(BIC_USEC)) + outp += sprintf(outp, "%susec", (printed++ ? delim : "")); + if (DO_BIC(BIC_TOD)) + outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); if (DO_BIC(BIC_Package)) outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); + if (DO_BIC(BIC_Node)) + outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); if (DO_BIC(BIC_Core)) outp += sprintf(outp, "%sCore", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) @@ -576,7 +637,7 @@ void print_header(char *delim) if (DO_BIC(BIC_CPU_c1)) outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); - if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU_c6)) outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); @@ -635,6 +696,10 @@ void print_header(char *delim) outp += sprintf(outp, "%sPkg%%pc9", (printed++ ? delim : "")); if (DO_BIC(BIC_Pkgpc10)) outp += sprintf(outp, "%sPk%%pc10", (printed++ ? delim : "")); + if (DO_BIC(BIC_CPU_LPI)) + outp += sprintf(outp, "%sCPU%%LPI", (printed++ ? delim : "")); + if (DO_BIC(BIC_SYS_LPI)) + outp += sprintf(outp, "%sSYS%%LPI", (printed++ ? delim : "")); if (do_rapl && !rapl_joules) { if (DO_BIC(BIC_PkgWatt)) @@ -739,6 +804,9 @@ int dump_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "pc8: %016llX\n", p->pc8); outp += sprintf(outp, "pc9: %016llX\n", p->pc9); outp += sprintf(outp, "pc10: %016llX\n", p->pc10); + outp += sprintf(outp, "pc10: %016llX\n", p->pc10); + outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); + outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); outp += sprintf(outp, "Joules COR: %0X\n", p->energy_cores); outp += sprintf(outp, "Joules GFX: %0X\n", p->energy_gfx); @@ -786,7 +854,7 @@ int format_counters(struct thread_data *t, struct core_data *c, (cpu_subset && !CPU_ISSET_S(t->cpu_id, cpu_subset_size, cpu_subset))) return 0; - if (debug) { + if (DO_BIC(BIC_USEC)) { /* on each row, print how many usec each timestamp took to gather */ struct timeval tv; @@ -794,6 +862,10 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "%5ld\t", tv.tv_sec * 1000000 + tv.tv_usec); } + /* Time_Of_Day_Seconds: on each row, print sec.usec last timestamp taken */ + if (DO_BIC(BIC_TOD)) + outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); + interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; tsc = t->tsc * tsc_tweak; @@ -802,6 +874,8 @@ int format_counters(struct thread_data *t, struct core_data *c, if (t == &average.threads) { if (DO_BIC(BIC_Package)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); + if (DO_BIC(BIC_Node)) + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_Core)) outp += sprintf(outp, "%s-", (printed++ ? delim : "")); if (DO_BIC(BIC_CPU)) @@ -813,6 +887,15 @@ int format_counters(struct thread_data *t, struct core_data *c, else outp += sprintf(outp, "%s-", (printed++ ? delim : "")); } + if (DO_BIC(BIC_Node)) { + if (t) + outp += sprintf(outp, "%s%d", + (printed++ ? delim : ""), + cpus[t->cpu_id].physical_node_id); + else + outp += sprintf(outp, "%s-", + (printed++ ? delim : "")); + } if (DO_BIC(BIC_Core)) { if (c) outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_id); @@ -882,7 +965,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); if (DO_BIC(BIC_CPU_c6)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); @@ -959,6 +1042,11 @@ int format_counters(struct thread_data *t, struct core_data *c, if (DO_BIC(BIC_Pkgpc10)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10/tsc); + if (DO_BIC(BIC_CPU_LPI)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); + if (DO_BIC(BIC_SYS_LPI)) + outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); + /* * If measurement interval exceeds minimum RAPL Joule Counter range, * indicate that results are suspect by printing "**" in fraction place. @@ -1006,7 +1094,8 @@ int format_counters(struct thread_data *t, struct core_data *c, } done: - outp += sprintf(outp, "\n"); + if (*(outp - 1) != '\n') + outp += sprintf(outp, "\n"); return 0; } @@ -1083,6 +1172,8 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pc8 = new->pc8 - old->pc8; old->pc9 = new->pc9 - old->pc9; old->pc10 = new->pc10 - old->pc10; + old->cpu_lpi = new->cpu_lpi - old->cpu_lpi; + old->sys_lpi = new->sys_lpi - old->sys_lpi; old->pkg_temp_c = new->pkg_temp_c; /* flag an error when rc6 counter resets/wraps */ @@ -1140,6 +1231,15 @@ delta_thread(struct thread_data *new, struct thread_data *old, int i; struct msr_counter *mp; + /* + * the timestamps from start of measurement interval are in "old" + * the timestamp from end of measurement interval are in "new" + * over-write old w/ new so we can print end of interval values + */ + + old->tv_begin = new->tv_begin; + old->tv_end = new->tv_end; + old->tsc = new->tsc - old->tsc; /* check for TSC < 1 Mcycles over interval */ @@ -1228,6 +1328,11 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data int i; struct msr_counter *mp; + t->tv_begin.tv_sec = 0; + t->tv_begin.tv_usec = 0; + t->tv_end.tv_sec = 0; + t->tv_end.tv_usec = 0; + t->tsc = 0; t->aperf = 0; t->mperf = 0; @@ -1260,6 +1365,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data p->pc8 = 0; p->pc9 = 0; p->pc10 = 0; + p->cpu_lpi = 0; + p->sys_lpi = 0; p->energy_pkg = 0; p->energy_dram = 0; @@ -1286,6 +1393,13 @@ int sum_counters(struct thread_data *t, struct core_data *c, int i; struct msr_counter *mp; + /* remember first tv_begin */ + if (average.threads.tv_begin.tv_sec == 0) + average.threads.tv_begin = t->tv_begin; + + /* remember last tv_end */ + average.threads.tv_end = t->tv_end; + average.threads.tsc += t->tsc; average.threads.aperf += t->aperf; average.threads.mperf += t->mperf; @@ -1341,6 +1455,9 @@ int sum_counters(struct thread_data *t, struct core_data *c, average.packages.pc9 += p->pc9; average.packages.pc10 += p->pc10; + average.packages.cpu_lpi = p->cpu_lpi; + average.packages.sys_lpi = p->sys_lpi; + average.packages.energy_pkg += p->energy_pkg; average.packages.energy_dram += p->energy_dram; average.packages.energy_cores += p->energy_cores; @@ -1487,7 +1604,7 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp) if (get_msr(cpu, mp->msr_num, counterp)) return -1; } else { - char path[128]; + char path[128 + PATH_BYTES]; if (mp->flags & SYSFS_PERCPU) { sprintf(path, "/sys/devices/system/cpu/cpu%d/%s", @@ -1603,7 +1720,7 @@ retry: if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates) { + if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } @@ -1684,6 +1801,11 @@ retry: if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10)) return -13; + if (DO_BIC(BIC_CPU_LPI)) + p->cpu_lpi = cpuidle_cur_cpu_lpi_us; + if (DO_BIC(BIC_SYS_LPI)) + p->sys_lpi = cpuidle_cur_sys_lpi_us; + if (do_rapl & RAPL_PKG) { if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr)) return -13; @@ -1769,7 +1891,7 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; -int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; +int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; static void @@ -2071,12 +2193,9 @@ dump_nhm_cst_cfg(void) get_msr(base_cpu, MSR_PKG_CST_CONFIG_CONTROL, &msr); -#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) -#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) - fprintf(outf, "cpu%d: MSR_PKG_CST_CONFIG_CONTROL: 0x%08llx", base_cpu, msr); - fprintf(outf, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n", + fprintf(outf, " (%s%s%s%s%slocked, pkg-cstate-limit=%d (%s)", (msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "", (msr & SNB_C1_AUTO_UNDEMOTE) ? "UNdemote-C1, " : "", (msr & NHM_C3_AUTO_DEMOTE) ? "demote-C3, " : "", @@ -2084,6 +2203,15 @@ dump_nhm_cst_cfg(void) (msr & (1 << 15)) ? "" : "UN", (unsigned int)msr & 0xF, pkg_cstate_limit_strings[pkg_cstate_limit]); + +#define AUTOMATIC_CSTATE_CONVERSION (1UL << 16) + if (has_automatic_cstate_conversion) { + fprintf(outf, ", automatic c-state conversion=%s", + (msr & AUTOMATIC_CSTATE_CONVERSION) ? "on" : "off"); + } + + fprintf(outf, ")\n"); + return; } @@ -2184,6 +2312,8 @@ void free_fd_percpu(void) void free_all_buffers(void) { + int i; + CPU_FREE(cpu_present_set); cpu_present_set = NULL; cpu_present_setsize = 0; @@ -2216,6 +2346,12 @@ void free_all_buffers(void) free(irq_column_2_cpu); free(irqs_per_cpu); + + for (i = 0; i <= topo.max_cpu_num; ++i) { + if (cpus[i].put_ids) + CPU_FREE(cpus[i].put_ids); + } + free(cpus); } @@ -2240,44 +2376,6 @@ int parse_int_file(const char *fmt, ...) } /* - * get_cpu_position_in_core(cpu) - * return the position of the CPU among its HT siblings in the core - * return -1 if the sibling is not in list - */ -int get_cpu_position_in_core(int cpu) -{ - char path[64]; - FILE *filep; - int this_cpu; - char character; - int i; - - sprintf(path, - "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", - cpu); - filep = fopen(path, "r"); - if (filep == NULL) { - perror(path); - exit(1); - } - - for (i = 0; i < topo.num_threads_per_core; i++) { - fscanf(filep, "%d", &this_cpu); - if (this_cpu == cpu) { - fclose(filep); - return i; - } - - /* Account for no separator after last thread*/ - if (i != (topo.num_threads_per_core - 1)) - fscanf(filep, "%c", &character); - } - - fclose(filep); - return -1; -} - -/* * cpu_is_first_core_in_package(cpu) * return 1 if given CPU is 1st core in package */ @@ -2296,35 +2394,115 @@ int get_core_id(int cpu) return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); } -int get_num_ht_siblings(int cpu) +void set_node_data(void) { char path[80]; FILE *filep; - int sib1; - int matches = 0; - char character; - char str[100]; - char *ch; + int pkg, node, cpu; - sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); - filep = fopen_or_die(path, "r"); + struct pkg_node_info { + int count; + int min; + } *pni; - /* - * file format: - * A ',' separated or '-' separated set of numbers - * (eg 1-2 or 1,3,4,5) - */ - fscanf(filep, "%d%c\n", &sib1, &character); - fseek(filep, 0, SEEK_SET); - fgets(str, 100, filep); - ch = strchr(str, character); - while (ch != NULL) { - matches++; - ch = strchr(ch+1, character); + pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); + if (!pni) + err(1, "calloc pkg_node_count"); + + for (pkg = 0; pkg < topo.num_packages; pkg++) + pni[pkg].min = topo.num_cpus; + + for (node = 0; node <= topo.max_node_num; node++) { + /* find the "first" cpu in the node */ + sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); + filep = fopen(path, "r"); + if (!filep) + continue; + fscanf(filep, "%d", &cpu); + fclose(filep); + + pkg = cpus[cpu].physical_package_id; + pni[pkg].count++; + + if (node < pni[pkg].min) + pni[pkg].min = node; } + for (pkg = 0; pkg < topo.num_packages; pkg++) + if (pni[pkg].count > topo.nodes_per_pkg) + topo.nodes_per_pkg = pni[0].count; + + for (cpu = 0; cpu < topo.num_cpus; cpu++) { + pkg = cpus[cpu].physical_package_id; + node = cpus[cpu].physical_node_id; + cpus[cpu].logical_node_id = node - pni[pkg].min; + } + free(pni); + +} + +int get_physical_node_id(struct cpu_topology *thiscpu) +{ + char path[80]; + FILE *filep; + int i; + int cpu = thiscpu->logical_cpu_id; + + for (i = 0; i <= topo.max_cpu_num; i++) { + sprintf(path, "/sys/devices/system/cpu/cpu%d/node%i/cpulist", + cpu, i); + filep = fopen(path, "r"); + if (!filep) + continue; + fclose(filep); + return i; + } + return -1; +} + +int get_thread_siblings(struct cpu_topology *thiscpu) +{ + char path[80], character; + FILE *filep; + unsigned long map; + int so, shift, sib_core; + int cpu = thiscpu->logical_cpu_id; + int offset = topo.max_cpu_num + 1; + size_t size; + int thread_id = 0; + + thiscpu->put_ids = CPU_ALLOC((topo.max_cpu_num + 1)); + if (thiscpu->thread_id < 0) + thiscpu->thread_id = thread_id++; + if (!thiscpu->put_ids) + return -1; + + size = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); + CPU_ZERO_S(size, thiscpu->put_ids); + + sprintf(path, + "/sys/devices/system/cpu/cpu%d/topology/thread_siblings", cpu); + filep = fopen_or_die(path, "r"); + do { + offset -= BITMASK_SIZE; + fscanf(filep, "%lx%c", &map, &character); + for (shift = 0; shift < BITMASK_SIZE; shift++) { + if ((map >> shift) & 0x1) { + so = shift + offset; + sib_core = get_core_id(so); + if (sib_core == thiscpu->physical_core_id) { + CPU_SET_S(so, size, thiscpu->put_ids); + if ((so != cpu) && + (cpus[so].thread_id < 0)) + cpus[so].thread_id = + thread_id++; + } + } + } + } while (!strncmp(&character, ",", 1)); fclose(filep); - return matches+1; + + return CPU_COUNT_S(size, thiscpu->put_ids); } /* @@ -2339,32 +2517,42 @@ int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *, struct thread_data *thread_base2, struct core_data *core_base2, struct pkg_data *pkg_base2) { - int retval, pkg_no, core_no, thread_no; + int retval, pkg_no, node_no, core_no, thread_no; for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { - for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) { - for (thread_no = 0; thread_no < - topo.num_threads_per_core; ++thread_no) { - struct thread_data *t, *t2; - struct core_data *c, *c2; - struct pkg_data *p, *p2; - - t = GET_THREAD(thread_base, thread_no, core_no, pkg_no); - - if (cpu_is_not_present(t->cpu_id)) - continue; - - t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no); - - c = GET_CORE(core_base, core_no, pkg_no); - c2 = GET_CORE(core_base2, core_no, pkg_no); - - p = GET_PKG(pkg_base, pkg_no); - p2 = GET_PKG(pkg_base2, pkg_no); - - retval = func(t, c, p, t2, c2, p2); - if (retval) - return retval; + for (node_no = 0; node_no < topo.nodes_per_pkg; ++node_no) { + for (core_no = 0; core_no < topo.cores_per_node; + ++core_no) { + for (thread_no = 0; thread_no < + topo.threads_per_core; ++thread_no) { + struct thread_data *t, *t2; + struct core_data *c, *c2; + struct pkg_data *p, *p2; + + t = GET_THREAD(thread_base, thread_no, + core_no, node_no, + pkg_no); + + if (cpu_is_not_present(t->cpu_id)) + continue; + + t2 = GET_THREAD(thread_base2, thread_no, + core_no, node_no, + pkg_no); + + c = GET_CORE(core_base, core_no, + node_no, pkg_no); + c2 = GET_CORE(core_base2, core_no, + node_no, + pkg_no); + + p = GET_PKG(pkg_base, pkg_no); + p2 = GET_PKG(pkg_base2, pkg_no); + + retval = func(t, c, p, t2, c2, p2); + if (retval) + return retval; + } } } } @@ -2409,6 +2597,20 @@ void re_initialize(void) printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus); } +void set_max_cpu_num(void) +{ + FILE *filep; + unsigned long dummy; + + topo.max_cpu_num = 0; + filep = fopen_or_die( + "/sys/devices/system/cpu/cpu0/topology/thread_siblings", + "r"); + while (fscanf(filep, "%lx,", &dummy) == 1) + topo.max_cpu_num += BITMASK_SIZE; + fclose(filep); + topo.max_cpu_num--; /* 0 based */ +} /* * count_cpus() @@ -2416,10 +2618,7 @@ void re_initialize(void) */ int count_cpus(int cpu) { - if (topo.max_cpu_num < cpu) - topo.max_cpu_num = cpu; - - topo.num_cpus += 1; + topo.num_cpus++; return 0; } int mark_cpu_present(int cpu) @@ -2428,6 +2627,12 @@ int mark_cpu_present(int cpu) return 0; } +int init_thread_id(int cpu) +{ + cpus[cpu].thread_id = -1; + return 0; +} + /* * snapshot_proc_interrupts() * @@ -2542,6 +2747,52 @@ int snapshot_gfx_mhz(void) } /* + * snapshot_cpu_lpi() + * + * record snapshot of + * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us + * + * return 1 if config change requires a restart, else return 0 + */ +int snapshot_cpu_lpi_us(void) +{ + FILE *fp; + int retval; + + fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); + + retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); + if (retval != 1) + err(1, "CPU LPI"); + + fclose(fp); + + return 0; +} +/* + * snapshot_sys_lpi() + * + * record snapshot of + * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us + * + * return 1 if config change requires a restart, else return 0 + */ +int snapshot_sys_lpi_us(void) +{ + FILE *fp; + int retval; + + fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); + + retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); + if (retval != 1) + err(1, "SYS LPI"); + + fclose(fp); + + return 0; +} +/* * snapshot /proc and /sys files * * return 1 if configuration restart needed, else return 0 @@ -2558,13 +2809,83 @@ int snapshot_proc_sysfs_files(void) if (DO_BIC(BIC_GFXMHz)) snapshot_gfx_mhz(); + if (DO_BIC(BIC_CPU_LPI)) + snapshot_cpu_lpi_us(); + + if (DO_BIC(BIC_SYS_LPI)) + snapshot_sys_lpi_us(); + return 0; } +int exit_requested; + +static void signal_handler (int signal) +{ + switch (signal) { + case SIGINT: + exit_requested = 1; + if (debug) + fprintf(stderr, " SIGINT\n"); + break; + case SIGUSR1: + if (debug > 1) + fprintf(stderr, "SIGUSR1\n"); + break; + } + /* make sure this manually-invoked interval is at least 1ms long */ + nanosleep(&one_msec, NULL); +} + +void setup_signal_handler(void) +{ + struct sigaction sa; + + memset(&sa, 0, sizeof(sa)); + + sa.sa_handler = &signal_handler; + + if (sigaction(SIGINT, &sa, NULL) < 0) + err(1, "sigaction SIGINT"); + if (sigaction(SIGUSR1, &sa, NULL) < 0) + err(1, "sigaction SIGUSR1"); +} + +void do_sleep(void) +{ + struct timeval select_timeout; + fd_set readfds; + int retval; + + FD_ZERO(&readfds); + FD_SET(0, &readfds); + + if (!isatty(fileno(stdin))) { + nanosleep(&interval_ts, NULL); + return; + } + + select_timeout = interval_tv; + retval = select(1, &readfds, NULL, NULL, &select_timeout); + + if (retval == 1) { + switch (getc(stdin)) { + case 'q': + exit_requested = 1; + break; + } + /* make sure this manually-invoked interval is at least 1ms long */ + nanosleep(&one_msec, NULL); + } +} + void turbostat_loop() { int retval; int restarted = 0; + int done_iters = 0; + + setup_signal_handler(); restart: restarted++; @@ -2581,6 +2902,7 @@ restart: goto restart; } restarted = 0; + done_iters = 0; gettimeofday(&tv_even, (struct timezone *)NULL); while (1) { @@ -2588,7 +2910,7 @@ restart: re_initialize(); goto restart; } - nanosleep(&interval_ts, NULL); + do_sleep(); if (snapshot_proc_sysfs_files()) goto restart; retval = for_all_cpus(get_counters, ODD_COUNTERS); @@ -2607,7 +2929,11 @@ restart: compute_average(EVEN_COUNTERS); format_all_counters(EVEN_COUNTERS); flush_output_stdout(); - nanosleep(&interval_ts, NULL); + if (exit_requested) + break; + if (num_iterations && ++done_iters >= num_iterations) + break; + do_sleep(); if (snapshot_proc_sysfs_files()) goto restart; retval = for_all_cpus(get_counters, EVEN_COUNTERS); @@ -2626,6 +2952,10 @@ restart: compute_average(ODD_COUNTERS); format_all_counters(ODD_COUNTERS); flush_output_stdout(); + if (exit_requested) + break; + if (num_iterations && ++done_iters >= num_iterations) + break; } } @@ -2740,6 +3070,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ pkg_cstate_limits = hsw_pkg_cstate_limits; has_misc_feature_control = 1; break; @@ -2945,6 +3276,7 @@ int has_config_tdp(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */ @@ -3399,6 +3731,7 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); @@ -3523,6 +3856,12 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) } } +void automatic_cstate_conversion_probe(unsigned int family, unsigned int model) +{ + if (is_skx(family, model) || is_bdx(family, model)) + has_automatic_cstate_conversion = 1; +} + int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; @@ -3728,6 +4067,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GEMINI_LAKE: @@ -3761,6 +4101,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */ case INTEL_FAM6_ATOM_GEMINI_LAKE: return 1; @@ -3786,6 +4127,7 @@ int has_skl_msrs(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ return 1; } return 0; @@ -3815,6 +4157,19 @@ int is_knl(unsigned int family, unsigned int model) return 0; } +int is_cnl(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + switch (model) { + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ + return 1; + } + + return 0; +} + unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) { if (is_knl(family, model)) @@ -3947,7 +4302,7 @@ void decode_misc_enable_msr(void) base_cpu, msr, msr & MSR_IA32_MISC_ENABLE_TM1 ? "" : "No-", msr & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP ? "" : "No-", - msr & MSR_IA32_MISC_ENABLE_MWAIT ? "No-" : "", + msr & MSR_IA32_MISC_ENABLE_MWAIT ? "" : "No-", msr & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE ? "No-" : "", msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ? "No-" : ""); } @@ -4152,7 +4507,6 @@ void process_cpuid() case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ crystal_hz = 24000000; /* 24.0 MHz */ break; - case INTEL_FAM6_SKYLAKE_X: /* SKX */ case INTEL_FAM6_ATOM_DENVERTON: /* DNV */ crystal_hz = 25000000; /* 25.0 MHz */ break; @@ -4253,6 +4607,7 @@ void process_cpuid() } do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); + do_cnl_cstates = is_cnl(family, model); if (!quiet) decode_misc_pwr_mgmt_msr(); @@ -4262,6 +4617,7 @@ void process_cpuid() rapl_probe(family, model); perf_limit_reasons_probe(family, model); + automatic_cstate_conversion_probe(family, model); if (!quiet) dump_cstate_pstate_config_info(family, model); @@ -4280,6 +4636,16 @@ void process_cpuid() if (!access("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", R_OK)) BIC_PRESENT(BIC_GFXMHz); + if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", R_OK)) + BIC_PRESENT(BIC_CPU_LPI); + else + BIC_NOT_PRESENT(BIC_CPU_LPI); + + if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK)) + BIC_PRESENT(BIC_SYS_LPI); + else + BIC_NOT_PRESENT(BIC_SYS_LPI); + if (!quiet) decode_misc_feature_control(); @@ -4310,14 +4676,10 @@ void topology_probe() int max_core_id = 0; int max_package_id = 0; int max_siblings = 0; - struct cpu_topology { - int core_id; - int physical_package_id; - } *cpus; /* Initialize num_cpus, max_cpu_num */ + set_max_cpu_num(); topo.num_cpus = 0; - topo.max_cpu_num = 0; for_all_proc_cpus(count_cpus); if (!summary_only && topo.num_cpus > 1) BIC_PRESENT(BIC_CPU); @@ -4357,6 +4719,7 @@ void topology_probe() cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1)); CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); + for_all_proc_cpus(init_thread_id); /* * For online cpus @@ -4370,26 +4733,45 @@ void topology_probe() fprintf(outf, "cpu%d NOT PRESENT\n", i); continue; } - cpus[i].core_id = get_core_id(i); - if (cpus[i].core_id > max_core_id) - max_core_id = cpus[i].core_id; + cpus[i].logical_cpu_id = i; + + /* get package information */ cpus[i].physical_package_id = get_physical_package_id(i); if (cpus[i].physical_package_id > max_package_id) max_package_id = cpus[i].physical_package_id; - siblings = get_num_ht_siblings(i); + /* get numa node information */ + cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); + if (cpus[i].physical_node_id > topo.max_node_num) + topo.max_node_num = cpus[i].physical_node_id; + + /* get core information */ + cpus[i].physical_core_id = get_core_id(i); + if (cpus[i].physical_core_id > max_core_id) + max_core_id = cpus[i].physical_core_id; + + /* get thread information */ + siblings = get_thread_siblings(&cpus[i]); if (siblings > max_siblings) max_siblings = siblings; + if (cpus[i].thread_id != -1) + topo.num_cores++; + if (debug > 1) - fprintf(outf, "cpu %d pkg %d core %d\n", - i, cpus[i].physical_package_id, cpus[i].core_id); + fprintf(outf, + "cpu %d pkg %d node %d core %d thread %d\n", + i, cpus[i].physical_package_id, + cpus[i].physical_node_id, + cpus[i].physical_core_id, + cpus[i].thread_id); } - topo.num_cores_per_pkg = max_core_id + 1; + + topo.cores_per_node = max_core_id + 1; if (debug > 1) fprintf(outf, "max_core_id %d, sizing for %d cores per package\n", - max_core_id, topo.num_cores_per_pkg); - if (!summary_only && topo.num_cores_per_pkg > 1) + max_core_id, topo.cores_per_node); + if (!summary_only && topo.cores_per_node > 1) BIC_PRESENT(BIC_Core); topo.num_packages = max_package_id + 1; @@ -4399,33 +4781,38 @@ void topology_probe() if (!summary_only && topo.num_packages > 1) BIC_PRESENT(BIC_Package); - topo.num_threads_per_core = max_siblings; + set_node_data(); if (debug > 1) - fprintf(outf, "max_siblings %d\n", max_siblings); + fprintf(outf, "nodes_per_pkg %d\n", topo.nodes_per_pkg); + if (!summary_only && topo.nodes_per_pkg > 1) + BIC_PRESENT(BIC_Node); - free(cpus); + topo.threads_per_core = max_siblings; + if (debug > 1) + fprintf(outf, "max_siblings %d\n", max_siblings); } void -allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p) +allocate_counters(struct thread_data **t, struct core_data **c, + struct pkg_data **p) { int i; + int num_cores = topo.cores_per_node * topo.nodes_per_pkg * + topo.num_packages; + int num_threads = topo.threads_per_core * num_cores; - *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg * - topo.num_packages, sizeof(struct thread_data)); + *t = calloc(num_threads, sizeof(struct thread_data)); if (*t == NULL) goto error; - for (i = 0; i < topo.num_threads_per_core * - topo.num_cores_per_pkg * topo.num_packages; i++) + for (i = 0; i < num_threads; i++) (*t)[i].cpu_id = -1; - *c = calloc(topo.num_cores_per_pkg * topo.num_packages, - sizeof(struct core_data)); + *c = calloc(num_cores, sizeof(struct core_data)); if (*c == NULL) goto error; - for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++) + for (i = 0; i < num_cores; i++) (*c)[i].core_id = -1; *p = calloc(topo.num_packages, sizeof(struct pkg_data)); @@ -4442,47 +4829,39 @@ error: /* * init_counter() * - * set cpu_id, core_num, pkg_num * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE - * - * increment topo.num_cores when 1st core in pkg seen */ void init_counter(struct thread_data *thread_base, struct core_data *core_base, - struct pkg_data *pkg_base, int thread_num, int core_num, - int pkg_num, int cpu_id) + struct pkg_data *pkg_base, int cpu_id) { + int pkg_id = cpus[cpu_id].physical_package_id; + int node_id = cpus[cpu_id].logical_node_id; + int core_id = cpus[cpu_id].physical_core_id; + int thread_id = cpus[cpu_id].thread_id; struct thread_data *t; struct core_data *c; struct pkg_data *p; - t = GET_THREAD(thread_base, thread_num, core_num, pkg_num); - c = GET_CORE(core_base, core_num, pkg_num); - p = GET_PKG(pkg_base, pkg_num); + t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); + c = GET_CORE(core_base, core_id, node_id, pkg_id); + p = GET_PKG(pkg_base, pkg_id); t->cpu_id = cpu_id; - if (thread_num == 0) { + if (thread_id == 0) { t->flags |= CPU_IS_FIRST_THREAD_IN_CORE; if (cpu_is_first_core_in_package(cpu_id)) t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE; } - c->core_id = core_num; - p->package_id = pkg_num; + c->core_id = core_id; + p->package_id = pkg_id; } int initialize_counters(int cpu_id) { - int my_thread_id, my_core_id, my_package_id; - - my_package_id = get_physical_package_id(cpu_id); - my_core_id = get_core_id(cpu_id); - my_thread_id = get_cpu_position_in_core(cpu_id); - if (!my_thread_id) - topo.num_cores++; - - init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); - init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); + init_counter(EVEN_COUNTERS, cpu_id); + init_counter(ODD_COUNTERS, cpu_id); return 0; } @@ -4630,7 +5009,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 17.06.23" + fprintf(outf, "turbostat version 18.06.01" " - Len Brown <lenb@kernel.org>\n"); } @@ -4661,7 +5040,7 @@ int add_counter(unsigned int msr_num, char *path, char *name, msrp->next = sys.tp; sys.tp = msrp; sys.added_thread_counters++; - if (sys.added_thread_counters > MAX_ADDED_COUNTERS) { + if (sys.added_thread_counters > MAX_ADDED_THREAD_COUNTERS) { fprintf(stderr, "exceeded max %d added thread counters\n", MAX_ADDED_COUNTERS); exit(-1); @@ -4820,7 +5199,7 @@ void probe_sysfs(void) if (!DO_BIC(BIC_sysfs)) return; - for (state = 10; state > 0; --state) { + for (state = 10; state >= 0; --state) { sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); @@ -4847,7 +5226,7 @@ void probe_sysfs(void) FORMAT_PERCENT, SYSFS_PERCPU); } - for (state = 10; state > 0; --state) { + for (state = 10; state >= 0; --state) { sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state); @@ -4960,34 +5339,6 @@ error: exit(-1); } -int shown; -/* - * parse_show_hide() - process cmdline to set default counter action - */ -void parse_show_hide(char *optarg, enum show_hide_mode new_mode) -{ - /* - * --show: show only those specified - * The 1st invocation will clear and replace the enabled mask - * subsequent invocations can add to it. - */ - if (new_mode == SHOW_LIST) { - if (shown == 0) - bic_enabled = bic_lookup(optarg, new_mode); - else - bic_enabled |= bic_lookup(optarg, new_mode); - shown = 1; - - return; - } - - /* - * --hide: do not show those specified - * multiple invocations simply clear more bits in enabled mask - */ - bic_enabled &= ~bic_lookup(optarg, new_mode); - -} void cmdline(int argc, char **argv) { @@ -4998,7 +5349,9 @@ void cmdline(int argc, char **argv) {"cpu", required_argument, 0, 'c'}, {"Dump", no_argument, 0, 'D'}, {"debug", no_argument, 0, 'd'}, /* internal, not documented */ + {"enable", required_argument, 0, 'e'}, {"interval", required_argument, 0, 'i'}, + {"num_iterations", required_argument, 0, 'n'}, {"help", no_argument, 0, 'h'}, {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help {"Joules", no_argument, 0, 'J'}, @@ -5014,7 +5367,7 @@ void cmdline(int argc, char **argv) progname = argv[0]; - while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v", + while ((opt = getopt_long_only(argc, argv, "+C:c:Dde:hi:Jn:o:qST:v", long_options, &option_index)) != -1) { switch (opt) { case 'a': @@ -5026,11 +5379,20 @@ void cmdline(int argc, char **argv) case 'D': dump_only++; break; + case 'e': + /* --enable specified counter */ + bic_enabled |= bic_lookup(optarg, SHOW_LIST); + break; case 'd': debug++; + ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); break; case 'H': - parse_show_hide(optarg, HIDE_LIST); + /* + * --hide: do not show those specified + * multiple invocations simply clear more bits in enabled mask + */ + bic_enabled &= ~bic_lookup(optarg, HIDE_LIST); break; case 'h': default: @@ -5046,7 +5408,8 @@ void cmdline(int argc, char **argv) exit(2); } - interval_ts.tv_sec = interval; + interval_tv.tv_sec = interval_ts.tv_sec = interval; + interval_tv.tv_usec = (interval - interval_tv.tv_sec) * 1000000; interval_ts.tv_nsec = (interval - interval_ts.tv_sec) * 1000000000; } break; @@ -5054,6 +5417,7 @@ void cmdline(int argc, char **argv) rapl_joules++; break; case 'l': + ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); list_header_only++; quiet++; break; @@ -5063,8 +5427,26 @@ void cmdline(int argc, char **argv) case 'q': quiet = 1; break; + case 'n': + num_iterations = strtod(optarg, NULL); + + if (num_iterations <= 0) { + fprintf(outf, "iterations %d should be positive number\n", + num_iterations); + exit(2); + } + break; case 's': - parse_show_hide(optarg, SHOW_LIST); + /* + * --show: show only those specified + * The 1st invocation will clear and replace the enabled mask + * subsequent invocations can add to it. + */ + if (shown == 0) + bic_enabled = bic_lookup(optarg, SHOW_LIST); + else + bic_enabled |= bic_lookup(optarg, SHOW_LIST); + shown = 1; break; case 'S': summary_only++; diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile index 2447b1bbaacf..f4534fb8b951 100644 --- a/tools/power/x86/x86_energy_perf_policy/Makefile +++ b/tools/power/x86/x86_energy_perf_policy/Makefile @@ -24,5 +24,5 @@ install : x86_energy_perf_policy install -d $(DESTDIR)$(PREFIX)/bin install $(BUILD_OUTPUT)/x86_energy_perf_policy $(DESTDIR)$(PREFIX)/bin/x86_energy_perf_policy install -d $(DESTDIR)$(PREFIX)/share/man/man8 - install x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8 + install -m 644 x86_energy_perf_policy.8 $(DESTDIR)$(PREFIX)/share/man/man8 diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index fa7ee369b3c9..db66f8a0d4be 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile @@ -17,7 +17,7 @@ ifeq ($(BUILD), 32) LDFLAGS += -m32 endif -targets: mapshift $(TARGETS) +targets: generated/map-shift.h $(TARGETS) main: $(OFILES) @@ -42,9 +42,7 @@ radix-tree.c: ../../../lib/radix-tree.c idr.c: ../../../lib/idr.c sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ -.PHONY: mapshift - -mapshift: +generated/map-shift.h: @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ generated/map-shift.h; \ diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c index 6c645eb77d42..ee820fcc29b0 100644 --- a/tools/testing/radix-tree/idr-test.c +++ b/tools/testing/radix-tree/idr-test.c @@ -252,6 +252,13 @@ void idr_checks(void) idr_remove(&idr, 3); idr_remove(&idr, 0); + assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); + idr_remove(&idr, 1); + for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) + assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); + idr_remove(&idr, 1 << 30); + idr_destroy(&idr); + for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { struct item *item = item_create(i, 0); assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 59245b3d587c..7bf405638b0b 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -16,6 +16,7 @@ #include <linux/radix-tree.h> #include <linux/slab.h> #include <linux/errno.h> +#include <pthread.h> #include "test.h" @@ -624,6 +625,67 @@ static void multiorder_account(void) item_kill_tree(&tree); } +bool stop_iteration = false; + +static void *creator_func(void *ptr) +{ + /* 'order' is set up to ensure we have sibling entries */ + unsigned int order = RADIX_TREE_MAP_SHIFT - 1; + struct radix_tree_root *tree = ptr; + int i; + + for (i = 0; i < 10000; i++) { + item_insert_order(tree, 0, order); + item_delete_rcu(tree, 0); + } + + stop_iteration = true; + return NULL; +} + +static void *iterator_func(void *ptr) +{ + struct radix_tree_root *tree = ptr; + struct radix_tree_iter iter; + struct item *item; + void **slot; + + while (!stop_iteration) { + rcu_read_lock(); + radix_tree_for_each_slot(slot, tree, &iter, 0) { + item = radix_tree_deref_slot(slot); + + if (!item) + continue; + if (radix_tree_deref_retry(item)) { + slot = radix_tree_iter_retry(&iter); + continue; + } + + item_sanity(item, iter.index); + } + rcu_read_unlock(); + } + return NULL; +} + +static void multiorder_iteration_race(void) +{ + const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); + pthread_t worker_thread[num_threads]; + RADIX_TREE(tree, GFP_KERNEL); + int i; + + pthread_create(&worker_thread[0], NULL, &creator_func, &tree); + for (i = 1; i < num_threads; i++) + pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); + + for (i = 0; i < num_threads; i++) + pthread_join(worker_thread[i], NULL); + + item_kill_tree(&tree); +} + void multiorder_checks(void) { int i; @@ -644,6 +706,7 @@ void multiorder_checks(void) multiorder_join(); multiorder_split(); multiorder_account(); + multiorder_iteration_race(); radix_tree_cpu_dead(0); } diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c index 5978ab1f403d..def6015570b2 100644 --- a/tools/testing/radix-tree/test.c +++ b/tools/testing/radix-tree/test.c @@ -75,6 +75,25 @@ int item_delete(struct radix_tree_root *root, unsigned long index) return 0; } +static void item_free_rcu(struct rcu_head *head) +{ + struct item *item = container_of(head, struct item, rcu_head); + + free(item); +} + +int item_delete_rcu(struct radix_tree_root *root, unsigned long index) +{ + struct item *item = radix_tree_delete(root, index); + + if (item) { + item_sanity(item, index); + call_rcu(&item->rcu_head, item_free_rcu); + return 1; + } + return 0; +} + void item_check_present(struct radix_tree_root *root, unsigned long index) { struct item *item; diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h index d9c031dbeb1a..31f1d9b6f506 100644 --- a/tools/testing/radix-tree/test.h +++ b/tools/testing/radix-tree/test.h @@ -5,6 +5,7 @@ #include <linux/rcupdate.h> struct item { + struct rcu_head rcu_head; unsigned long index; unsigned int order; }; @@ -12,9 +13,11 @@ struct item { struct item *item_create(unsigned long index, unsigned int order); int __item_insert(struct radix_tree_root *root, struct item *item); int item_insert(struct radix_tree_root *root, unsigned long index); +void item_sanity(struct item *item, unsigned long index); int item_insert_order(struct radix_tree_root *root, unsigned long index, unsigned order); int item_delete(struct radix_tree_root *root, unsigned long index); +int item_delete_rcu(struct radix_tree_root *root, unsigned long index); struct item *item_lookup(struct radix_tree_root *root, unsigned long index); void item_check_present(struct radix_tree_root *root, unsigned long index); diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 983dd25d49f4..1eefe211a4a8 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y CONFIG_TEST_BPF=m CONFIG_CGROUP_BPF=y CONFIG_NETDEVSIM=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_SCH_INGRESS=y diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3e7718b1a9ae..fd7de7eb329e 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -11713,6 +11713,11 @@ static void get_unpriv_disabled() FILE *fd; fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); + if (!fd) { + perror("fopen /proc/sys/"UNPRIV_SYSCTL); + unpriv_disabled = true; + return; + } if (fgets(buf, 2, fd) == buf && atoi(buf)) unpriv_disabled = true; fclose(fd); diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 2ddcc96ae456..d9d00319b07c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) INSTALL_HDR_PATH = $(top_srcdir)/usr LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ -CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) +CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. # After inclusion, $(OUTPUT) is defined and # $(TEST_GEN_PROGS) starts with $(OUTPUT)/ diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 7ab98e41324f..ac53730b30aa 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -19,6 +19,7 @@ #include <errno.h> #include <unistd.h> #include <fcntl.h> +#include "kselftest.h" ssize_t test_write(int fd, const void *buf, size_t count); ssize_t test_read(int fd, void *buf, size_t count); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 2cedfda181d4..37e2a787d2fc 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -50,8 +50,8 @@ int kvm_check_cap(long cap) int kvm_fd; kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", - KVM_DEV_PATH, kvm_fd, errno); + if (kvm_fd < 0) + exit(KSFT_SKIP); ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" @@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) vm->mode = mode; kvm_fd = open(KVM_DEV_PATH, perm); - TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", - KVM_DEV_PATH, kvm_fd, errno); + if (kvm_fd < 0) + exit(KSFT_SKIP); /* Create VM. */ vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); @@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) cpuid = allocate_kvm_cpuid2(); kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", - KVM_DEV_PATH, kvm_fd, errno); + if (kvm_fd < 0) + exit(KSFT_SKIP); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", @@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void) int dev_fd, ret; dev_fd = open(KVM_DEV_PATH, O_RDONLY); - TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i", - __func__, KVM_DEV_PATH, dev_fd, errno); + if (dev_fd < 0) + exit(KSFT_SKIP); ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); TEST_ASSERT(ret >= sizeof(struct kvm_run), diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/sync_regs_test.c index 428e9473f5e2..eae1ece3c31b 100644 --- a/tools/testing/selftests/kvm/sync_regs_test.c +++ b/tools/testing/selftests/kvm/sync_regs_test.c @@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, { } +#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS) +#define INVALID_SYNC_FIELD 0x80000000 + int main(int argc, char *argv[]) { struct kvm_vm *vm; @@ -98,9 +101,14 @@ int main(int argc, char *argv[]) setbuf(stdout, NULL); cap = kvm_check_cap(KVM_CAP_SYNC_REGS); - TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS, - "KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n", - cap, KVM_SYNC_X86_VALID_FIELDS); + if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) { + fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n"); + exit(KSFT_SKIP); + } + if ((cap & INVALID_SYNC_FIELD) != 0) { + fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n"); + exit(KSFT_SKIP); + } /* Create VM */ vm = vm_create_default(VCPU_ID, guest_code); @@ -108,7 +116,14 @@ int main(int argc, char *argv[]) run = vcpu_state(vm, VCPU_ID); /* Request reading invalid register set from VCPU. */ - run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1; + run->kvm_valid_regs = INVALID_SYNC_FIELD; + rv = _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(rv < 0 && errno == EINVAL, + "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", + rv); + vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; + + run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(rv < 0 && errno == EINVAL, "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", @@ -116,7 +131,14 @@ int main(int argc, char *argv[]) vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; /* Request setting invalid register set into VCPU. */ - run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1; + run->kvm_dirty_regs = INVALID_SYNC_FIELD; + rv = _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(rv < 0 && errno == EINVAL, + "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", + rv); + vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0; + + run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(rv < 0 && errno == EINVAL, "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", @@ -125,7 +147,7 @@ int main(int argc, char *argv[]) /* Request and verify all valid register sets. */ /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ - run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; + run->kvm_valid_regs = TEST_SYNC_FIELDS; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "Unexpected exit reason: %u (%s),\n", @@ -146,7 +168,7 @@ int main(int argc, char *argv[]) run->s.regs.sregs.apic_base = 1 << 11; /* TODO run->s.regs.events.XYZ = ABC; */ - run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; + run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, @@ -172,7 +194,7 @@ int main(int argc, char *argv[]) /* Clear kvm_dirty_regs bits, verify new s.regs values are * overwritten with existing guest values. */ - run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; + run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = 0; run->s.regs.regs.r11 = 0xDEADBEEF; rv = _vcpu_run(vm, VCPU_ID); @@ -211,7 +233,7 @@ int main(int argc, char *argv[]) * with kvm_sync_regs values. */ run->kvm_valid_regs = 0; - run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS; + run->kvm_dirty_regs = TEST_SYNC_FIELDS; run->s.regs.regs.r11 = 0xBBBB; rv = _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c index 8f7f62093add..aaa633263b2c 100644 --- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c @@ -189,8 +189,8 @@ int main(int argc, char *argv[]) struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); if (!(entry->ecx & CPUID_VMX)) { - printf("nested VMX not enabled, skipping test"); - return 0; + fprintf(stderr, "nested VMX not enabled, skipping test\n"); + exit(KSFT_SKIP); } vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index 6a75a3ea44ad..7ba089b33e8b 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -7,3 +7,8 @@ CONFIG_NET_L3_MASTER_DEV=y CONFIG_IPV6=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_VETH=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_NET_IPVTI=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_IPV6_VTI=y +CONFIG_DUMMY=y diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c index 365c32e84189..c9f478b40996 100644 --- a/tools/testing/selftests/net/reuseport_bpf_numa.c +++ b/tools/testing/selftests/net/reuseport_bpf_numa.c @@ -23,6 +23,8 @@ #include <unistd.h> #include <numa.h> +#include "../kselftest.h" + static const int PORT = 8888; static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) @@ -229,7 +231,7 @@ int main(void) int *rcv_fd, nodes; if (numa_available() < 0) - error(1, errno, "no numa api support"); + ksft_exit_skip("no numa api support\n"); nodes = numa_max_node() + 1; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 168c66d74fc5..e1473234968d 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -134,11 +134,15 @@ struct seccomp_data { #endif #ifndef SECCOMP_FILTER_FLAG_TSYNC -#define SECCOMP_FILTER_FLAG_TSYNC 1 +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) #endif #ifndef SECCOMP_FILTER_FLAG_LOG -#define SECCOMP_FILTER_FLAG_LOG 2 +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#endif + +#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) #endif #ifndef PTRACE_SECCOMP_GET_METADATA @@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock) TEST(detect_seccomp_filter_flags) { unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, - SECCOMP_FILTER_FLAG_LOG }; + SECCOMP_FILTER_FLAG_LOG, + SECCOMP_FILTER_FLAG_SPEC_ALLOW }; unsigned int flag, all_flags; int i; long ret; /* Test detection of known-good filter flags */ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { + int bits = 0; + flag = flags[i]; + /* Make sure the flag is a single bit! */ + while (flag) { + if (flag & 0x1) + bits ++; + flag >>= 1; + } + ASSERT_EQ(1, bits); + flag = flags[i]; + ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index d744991c0f4f..39f66bc29b82 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \ - protection_keys test_vdso test_vsyscall + protection_keys test_vdso test_vsyscall mov_ss_trap TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ test_FCMOV test_FCOMI test_FISTTP \ vdso_restorer diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c new file mode 100644 index 000000000000..3c3a022654f3 --- /dev/null +++ b/tools/testing/selftests/x86/mov_ss_trap.c @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS + * + * This does MOV SS from a watchpointed address followed by various + * types of kernel entries. A MOV SS that hits a watchpoint will queue + * up a #DB trap but will not actually deliver that trap. The trap + * will be delivered after the next instruction instead. The CPU's logic + * seems to be: + * + * - Any fault: drop the pending #DB trap. + * - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then + * deliver #DB. + * - ICEBP: enter the kernel but do not deliver the watchpoint trap + * - breakpoint: only one #DB is delivered (phew!) + * + * There are plenty of ways for a kernel to handle this incorrectly. This + * test tries to exercise all the cases. + * + * This should mostly cover CVE-2018-1087 and CVE-2018-8897. + */ +#define _GNU_SOURCE + +#include <stdlib.h> +#include <sys/ptrace.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <sys/user.h> +#include <sys/syscall.h> +#include <unistd.h> +#include <errno.h> +#include <stddef.h> +#include <stdio.h> +#include <err.h> +#include <string.h> +#include <setjmp.h> +#include <sys/prctl.h> + +#define X86_EFLAGS_RF (1UL << 16) + +#if __x86_64__ +# define REG_IP REG_RIP +#else +# define REG_IP REG_EIP +#endif + +unsigned short ss; +extern unsigned char breakpoint_insn[]; +sigjmp_buf jmpbuf; +static unsigned char altstack_data[SIGSTKSZ]; + +static void enable_watchpoint(void) +{ + pid_t parent = getpid(); + int status; + + pid_t child = fork(); + if (child < 0) + err(1, "fork"); + + if (child) { + if (waitpid(child, &status, 0) != child) + err(1, "waitpid for child"); + } else { + unsigned long dr0, dr1, dr7; + + dr0 = (unsigned long)&ss; + dr1 = (unsigned long)breakpoint_insn; + dr7 = ((1UL << 1) | /* G0 */ + (3UL << 16) | /* RW0 = read or write */ + (1UL << 18) | /* LEN0 = 2 bytes */ + (1UL << 3)); /* G1, RW1 = insn */ + + if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0) + err(1, "PTRACE_ATTACH"); + + if (waitpid(parent, &status, 0) != parent) + err(1, "waitpid for child"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0) + err(1, "PTRACE_POKEUSER DR0"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0) + err(1, "PTRACE_POKEUSER DR1"); + + if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0) + err(1, "PTRACE_POKEUSER DR7"); + + printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7); + + if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0) + err(1, "PTRACE_DETACH"); + + exit(0); + } +} + +static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), + int flags) +{ + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_sigaction = handler; + sa.sa_flags = SA_SIGINFO | flags; + sigemptyset(&sa.sa_mask); + if (sigaction(sig, &sa, 0)) + err(1, "sigaction"); +} + +static char const * const signames[] = { + [SIGSEGV] = "SIGSEGV", + [SIGBUS] = "SIBGUS", + [SIGTRAP] = "SIGTRAP", + [SIGILL] = "SIGILL", +}; + +static void sigtrap(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n", + (unsigned long)ctx->uc_mcontext.gregs[REG_IP], + !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF)); +} + +static void handle_and_return(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot %s with RIP=%lx\n", signames[sig], + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); +} + +static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void) +{ + ucontext_t *ctx = ctx_void; + + printf("\tGot %s with RIP=%lx\n", signames[sig], + (unsigned long)ctx->uc_mcontext.gregs[REG_IP]); + + siglongjmp(jmpbuf, 1); +} + +int main() +{ + unsigned long nr; + + asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss)); + printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss); + + if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0) + printf("\tPR_SET_PTRACER_ANY succeeded\n"); + + printf("\tSet up a watchpoint\n"); + sethandler(SIGTRAP, sigtrap, 0); + enable_watchpoint(); + + printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n"); + asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT3\n"); + asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT 3\n"); + asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; CS CS INT3\n"); + asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; CSx14 INT3\n"); + asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss)); + + printf("[RUN]\tMOV SS; INT 4\n"); + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss)); + +#ifdef __i386__ + printf("[RUN]\tMOV SS; INTO\n"); + sethandler(SIGSEGV, handle_and_return, SA_RESETHAND); + nr = -1; + asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into" + : [tmp] "+r" (nr) : [ss] "m" (ss)); +#endif + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; ICEBP\n"); + + /* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */ + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); + + asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss)); + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; CLI\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss)); + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; #PF\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]" + : [tmp] "=r" (nr) : [ss] "m" (ss)); + } + + /* + * INT $1: if #DB has DPL=3 and there isn't special handling, + * then the kernel will die. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; INT 1\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss)); + } + +#ifdef __x86_64__ + /* + * In principle, we should test 32-bit SYSCALL as well, but + * the calling convention is so unpredictable that it's + * not obviously worth the effort. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; SYSCALL\n"); + sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND); + nr = SYS_getpid; + /* + * Toggle the high bit of RSP to make it noncanonical to + * strengthen this test on non-SMAP systems. + */ + asm volatile ("btc $63, %%rsp\n\t" + "mov %[ss], %%ss; syscall\n\t" + "btc $63, %%rsp" + : "+a" (nr) : [ss] "m" (ss) + : "rcx" +#ifdef __x86_64__ + , "r11" +#endif + ); + } +#endif + + printf("[RUN]\tMOV SS; breakpointed NOP\n"); + asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss)); + + /* + * Invoking SYSENTER directly breaks all the rules. Just handle + * the SIGSEGV. + */ + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; SYSENTER\n"); + stack_t stack = { + .ss_sp = altstack_data, + .ss_size = SIGSTKSZ, + }; + if (sigaltstack(&stack, NULL) != 0) + err(1, "sigaltstack"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK); + nr = SYS_getpid; + asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr) + : [ss] "m" (ss) : "flags", "rcx" +#ifdef __x86_64__ + , "r11" +#endif + ); + + /* We're unreachable here. SYSENTER forgets RIP. */ + } + + if (sigsetjmp(jmpbuf, 1) == 0) { + printf("[RUN]\tMOV SS; INT $0x80\n"); + sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND); + nr = 20; /* compat getpid */ + asm volatile ("mov %[ss], %%ss; int $0x80" + : "+a" (nr) : [ss] "m" (ss) + : "flags" +#ifdef __x86_64__ + , "r8", "r9", "r10", "r11" +#endif + ); + } + + printf("[OK]\tI aten't dead\n"); + return 0; +} diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c index 9c0325e1ea68..50f7e9272481 100644 --- a/tools/testing/selftests/x86/mpx-mini-test.c +++ b/tools/testing/selftests/x86/mpx-mini-test.c @@ -368,6 +368,11 @@ static int expected_bnd_index = -1; uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */ unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS]; +/* Failed address bound checks: */ +#ifndef SEGV_BNDERR +# define SEGV_BNDERR 3 +#endif + /* * The kernel is supposed to provide some information about the bounds * exception in the siginfo. It should match what we have in the bounds @@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext) br_count++; dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count); -#define SEGV_BNDERR 3 /* failed address bound checks */ - dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n", status, ip, br_reason); dprintf2("si_signo: %d\n", si->si_signo); diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h index b3cb7670e026..254e5436bdd9 100644 --- a/tools/testing/selftests/x86/pkey-helpers.h +++ b/tools/testing/selftests/x86/pkey-helpers.h @@ -26,30 +26,26 @@ static inline void sigsafe_printf(const char *format, ...) { va_list ap; - va_start(ap, format); if (!dprint_in_signal) { + va_start(ap, format); vprintf(format, ap); + va_end(ap); } else { int ret; - int len = vsnprintf(dprint_in_signal_buffer, - DPRINT_IN_SIGNAL_BUF_SIZE, - format, ap); /* - * len is amount that would have been printed, - * but actual write is truncated at BUF_SIZE. + * No printf() functions are signal-safe. + * They deadlock easily. Write the format + * string to get some output, even if + * incomplete. */ - if (len > DPRINT_IN_SIGNAL_BUF_SIZE) - len = DPRINT_IN_SIGNAL_BUF_SIZE; - ret = write(1, dprint_in_signal_buffer, len); + ret = write(1, format, strlen(format)); if (ret < 0) - abort(); + exit(1); } - va_end(ap); } #define dprintf_level(level, args...) do { \ if (level <= DEBUG_LEVEL) \ sigsafe_printf(args); \ - fflush(NULL); \ } while (0) #define dprintf0(args...) dprintf_level(0, args) #define dprintf1(args...) dprintf_level(1, args) diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c index f15aa5a76fe3..460b4bdf4c1e 100644 --- a/tools/testing/selftests/x86/protection_keys.c +++ b/tools/testing/selftests/x86/protection_keys.c @@ -72,10 +72,9 @@ extern void abort_hooks(void); test_nr, iteration_nr); \ dprintf0("errno at assert: %d", errno); \ abort_hooks(); \ - assert(condition); \ + exit(__LINE__); \ } \ } while (0) -#define raw_assert(cond) assert(cond) void cat_into_file(char *str, char *file) { @@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file) * these need to be raw because they are called under * pkey_assert() */ - raw_assert(fd >= 0); + if (fd < 0) { + fprintf(stderr, "error opening '%s'\n", str); + perror("error: "); + exit(__LINE__); + } + ret = write(fd, str, strlen(str)); if (ret != strlen(str)) { perror("write to file failed"); fprintf(stderr, "filename: '%s' str: '%s'\n", file, str); - raw_assert(0); + exit(__LINE__); } close(fd); } @@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me) #ifdef __i386__ #ifndef SYS_mprotect_key -# define SYS_mprotect_key 380 +# define SYS_mprotect_key 380 #endif + #ifndef SYS_pkey_alloc -# define SYS_pkey_alloc 381 -# define SYS_pkey_free 382 +# define SYS_pkey_alloc 381 +# define SYS_pkey_free 382 #endif -#define REG_IP_IDX REG_EIP -#define si_pkey_offset 0x14 + +#define REG_IP_IDX REG_EIP +#define si_pkey_offset 0x14 #else #ifndef SYS_mprotect_key -# define SYS_mprotect_key 329 +# define SYS_mprotect_key 329 #endif + #ifndef SYS_pkey_alloc -# define SYS_pkey_alloc 330 -# define SYS_pkey_free 331 +# define SYS_pkey_alloc 330 +# define SYS_pkey_free 331 #endif -#define REG_IP_IDX REG_RIP -#define si_pkey_offset 0x20 + +#define REG_IP_IDX REG_RIP +#define si_pkey_offset 0x20 #endif @@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes) } } -#define SEGV_BNDERR 3 /* failed address bound checks */ -#define SEGV_PKUERR 4 +/* Failed address bound checks: */ +#ifndef SEGV_BNDERR +# define SEGV_BNDERR 3 +#endif + +#ifndef SEGV_PKUERR +# define SEGV_PKUERR 4 +#endif static char *si_code_str(int si_code) { @@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) dump_mem(pkru_ptr - 128, 256); pkey_assert(*pkru_ptr); - si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); - dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); - dump_mem(si_pkey_ptr - 8, 24); - siginfo_pkey = *si_pkey_ptr; - pkey_assert(siginfo_pkey < NR_PKEYS); - last_si_pkey = siginfo_pkey; - if ((si->si_code == SEGV_MAPERR) || (si->si_code == SEGV_ACCERR) || (si->si_code == SEGV_BNDERR)) { @@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) exit(4); } + si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset); + dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr); + dump_mem((u8 *)si_pkey_ptr - 8, 24); + siginfo_pkey = *si_pkey_ptr; + pkey_assert(siginfo_pkey < NR_PKEYS); + last_si_pkey = siginfo_pkey; + dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr); /* need __rdpkru() version so we do not do shadow_pkru checking */ dprintf1("signal pkru from pkru: %08x\n", __rdpkru()); @@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n"); pkru_faults++; dprintf1("<<<<==================================================\n"); - return; - if (trapno == 14) { - fprintf(stderr, - "ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n", - trapno, ip); - fprintf(stderr, "si_addr %p\n", si->si_addr); - fprintf(stderr, "REG_ERR: %lx\n", - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - exit(1); - } else { - fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip); - fprintf(stderr, "si_addr %p\n", si->si_addr); - fprintf(stderr, "REG_ERR: %lx\n", - (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]); - exit(2); - } dprint_in_signal = 0; } @@ -393,10 +391,15 @@ pid_t fork_lazy_child(void) return forkret; } -#define PKEY_DISABLE_ACCESS 0x1 -#define PKEY_DISABLE_WRITE 0x2 +#ifndef PKEY_DISABLE_ACCESS +# define PKEY_DISABLE_ACCESS 0x1 +#endif + +#ifndef PKEY_DISABLE_WRITE +# define PKEY_DISABLE_WRITE 0x2 +#endif -u32 pkey_get(int pkey, unsigned long flags) +static u32 hw_pkey_get(int pkey, unsigned long flags) { u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); u32 pkru = __rdpkru(); @@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags) return masked_pkru; } -int pkey_set(int pkey, unsigned long rights, unsigned long flags) +static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags) { u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE); u32 old_pkru = __rdpkru(); @@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags) pkey, flags); pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); - pkey_rights = pkey_get(pkey, syscall_flags); + pkey_rights = hw_pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); pkey_assert(pkey_rights >= 0); pkey_rights |= flags; - ret = pkey_set(pkey, pkey_rights, syscall_flags); + ret = hw_pkey_set(pkey, pkey_rights, syscall_flags); assert(!ret); /*pkru and flags have the same format */ shadow_pkru |= flags << (pkey * 2); @@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags) pkey_assert(ret >= 0); - pkey_rights = pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + pkey_rights = hw_pkey_get(pkey, syscall_flags); + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); @@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags) { unsigned long syscall_flags = 0; int ret; - int pkey_rights = pkey_get(pkey, syscall_flags); + int pkey_rights = hw_pkey_get(pkey, syscall_flags); u32 orig_pkru = rdpkru(); pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); pkey_assert(pkey_rights >= 0); pkey_rights |= flags; - ret = pkey_set(pkey, pkey_rights, 0); + ret = hw_pkey_set(pkey, pkey_rights, 0); /* pkru and flags have the same format */ shadow_pkru &= ~(flags << (pkey * 2)); pkey_assert(ret >= 0); - pkey_rights = pkey_get(pkey, syscall_flags); - dprintf1("%s(%d) pkey_get(%d): %x\n", __func__, + pkey_rights = hw_pkey_get(pkey, syscall_flags); + dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__, pkey, pkey, pkey_rights); dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru()); @@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, struct pkey_malloc_record { void *ptr; long size; + int prot; }; struct pkey_malloc_record *pkey_malloc_records; +struct pkey_malloc_record *pkey_last_malloc_record; long nr_pkey_malloc_records; -void record_pkey_malloc(void *ptr, long size) +void record_pkey_malloc(void *ptr, long size, int prot) { long i; struct pkey_malloc_record *rec = NULL; @@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size) (int)(rec - pkey_malloc_records), rec, ptr, size); rec->ptr = ptr; rec->size = size; + rec->prot = prot; + pkey_last_malloc_record = rec; nr_pkey_malloc_records++; } @@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) pkey_assert(ptr != (void *)-1); ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey); pkey_assert(!ret); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); rdpkru(); dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr); @@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) size = ALIGN_UP(size, HPAGE_SIZE * 2); ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); pkey_assert(ptr != (void *)-1); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); mprotect_pkey(ptr, size, prot, pkey); dprintf1("unaligned ptr: %p\n", ptr); @@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) pkey_assert(ptr != (void *)-1); mprotect_pkey(ptr, size, prot, pkey); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr); return ptr; @@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey) mprotect_pkey(ptr, size, prot, pkey); - record_pkey_malloc(ptr, size); + record_pkey_malloc(ptr, size, prot); dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr); close(fd); @@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey) } int last_pkru_faults; +#define UNKNOWN_PKEY -2 void expected_pk_fault(int pkey) { dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n", __func__, last_pkru_faults, pkru_faults); dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey); pkey_assert(last_pkru_faults + 1 == pkru_faults); - pkey_assert(last_si_pkey == pkey); + + /* + * For exec-only memory, we do not know the pkey in + * advance, so skip this check. + */ + if (pkey != UNKNOWN_PKEY) + pkey_assert(last_si_pkey == pkey); + /* * The signal handler shold have cleared out PKRU to let the * test program continue. We now have to restore it. @@ -939,10 +954,11 @@ void expected_pk_fault(int pkey) last_si_pkey = -1; } -void do_not_expect_pk_fault(void) -{ - pkey_assert(last_pkru_faults == pkru_faults); -} +#define do_not_expect_pk_fault(msg) do { \ + if (last_pkru_faults != pkru_faults) \ + dprintf0("unexpected PK fault: %s\n", msg); \ + pkey_assert(last_pkru_faults == pkru_faults); \ +} while (0) int test_fds[10] = { -1 }; int nr_test_fds; @@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) pkey_assert(i < NR_PKEYS*2); /* - * There are 16 pkeys supported in hardware. One is taken - * up for the default (0) and another can be taken up by - * an execute-only mapping. Ensure that we can allocate - * at least 14 (16-2). + * There are 16 pkeys supported in hardware. Three are + * allocated by the time we get here: + * 1. The default key (0) + * 2. One possibly consumed by an execute-only mapping. + * 3. One allocated by the test code and passed in via + * 'pkey' to this function. + * Ensure that we can allocate at least another 13 (16-3). */ - pkey_assert(i >= NR_PKEYS-2); + pkey_assert(i >= NR_PKEYS-3); for (i = 0; i < nr_allocated_pkeys; i++) { err = sys_pkey_free(allocated_pkeys[i]); @@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) } } +/* + * pkey 0 is special. It is allocated by default, so you do not + * have to call pkey_alloc() to use it first. Make sure that it + * is usable. + */ +void test_mprotect_with_pkey_0(int *ptr, u16 pkey) +{ + long size; + int prot; + + assert(pkey_last_malloc_record); + size = pkey_last_malloc_record->size; + /* + * This is a bit of a hack. But mprotect() requires + * huge-page-aligned sizes when operating on hugetlbfs. + * So, make sure that we use something that's a multiple + * of a huge page when we can. + */ + if (size >= HPAGE_SIZE) + size = HPAGE_SIZE; + prot = pkey_last_malloc_record->prot; + + /* Use pkey 0 */ + mprotect_pkey(ptr, size, prot, 0); + + /* Make sure that we can set it back to the original pkey. */ + mprotect_pkey(ptr, size, prot, pkey); +} + void test_ptrace_of_child(int *ptr, u16 pkey) { __attribute__((__unused__)) int peek_result; @@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey) pkey_assert(ret != -1); /* Now access from the current task, and expect NO exception: */ peek_result = read_ptr(plain_ptr); - do_not_expect_pk_fault(); + do_not_expect_pk_fault("read plain pointer after ptrace"); ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0); pkey_assert(ret != -1); @@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey) free(plain_ptr_unaligned); } -void test_executing_on_unreadable_memory(int *ptr, u16 pkey) +void *get_pointer_to_instructions(void) { void *p1; - int scratch; - int ptr_contents; - int ret; p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE); dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write); @@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) /* Point 'p1' at the *second* page of the function: */ p1 += PAGE_SIZE; + /* + * Try to ensure we fault this in on next touch to ensure + * we get an instruction fault as opposed to a data one + */ madvise(p1, PAGE_SIZE, MADV_DONTNEED); + + return p1; +} + +void test_executing_on_unreadable_memory(int *ptr, u16 pkey) +{ + void *p1; + int scratch; + int ptr_contents; + int ret; + + p1 = get_pointer_to_instructions(); lots_o_noops_around_write(&scratch); ptr_contents = read_ptr(p1); dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); @@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) */ madvise(p1, PAGE_SIZE, MADV_DONTNEED); lots_o_noops_around_write(&scratch); - do_not_expect_pk_fault(); + do_not_expect_pk_fault("executing on PROT_EXEC memory"); ptr_contents = read_ptr(p1); dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); expected_pk_fault(pkey); } +void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) +{ + void *p1; + int scratch; + int ptr_contents; + int ret; + + dprintf1("%s() start\n", __func__); + + p1 = get_pointer_to_instructions(); + lots_o_noops_around_write(&scratch); + ptr_contents = read_ptr(p1); + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); + + /* Use a *normal* mprotect(), not mprotect_pkey(): */ + ret = mprotect(p1, PAGE_SIZE, PROT_EXEC); + pkey_assert(!ret); + + dprintf2("pkru: %x\n", rdpkru()); + + /* Make sure this is an *instruction* fault */ + madvise(p1, PAGE_SIZE, MADV_DONTNEED); + lots_o_noops_around_write(&scratch); + do_not_expect_pk_fault("executing on PROT_EXEC memory"); + ptr_contents = read_ptr(p1); + dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents); + expected_pk_fault(UNKNOWN_PKEY); + + /* + * Put the memory back to non-PROT_EXEC. Should clear the + * exec-only pkey off the VMA and allow it to be readable + * again. Go to PROT_NONE first to check for a kernel bug + * that did not clear the pkey when doing PROT_NONE. + */ + ret = mprotect(p1, PAGE_SIZE, PROT_NONE); + pkey_assert(!ret); + + ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC); + pkey_assert(!ret); + ptr_contents = read_ptr(p1); + do_not_expect_pk_fault("plain read on recently PROT_EXEC area"); +} + void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) { int size = PAGE_SIZE; @@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = { test_kernel_gup_of_access_disabled_region, test_kernel_gup_write_to_write_disabled_region, test_executing_on_unreadable_memory, + test_implicit_mprotect_exec_only_memory, + test_mprotect_with_pkey_0, test_ptrace_of_child, test_pkey_syscalls_on_non_allocated_pkey, test_pkey_syscalls_bad_args, |