summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/cpumap.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/cpumap.c')
-rw-r--r--tools/perf/util/cpumap.c87
1 files changed, 79 insertions, 8 deletions
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 2c0b52264a46..37b3bb79ee08 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -3,12 +3,16 @@
#include "../perf.h"
#include "cpumap.h"
#include <assert.h>
+#include <dirent.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include "asm/bug.h"
+#include "sane_ctype.h"
+
static int max_cpu_num;
+static int max_present_cpu_num;
static int max_node_num;
static int *cpunode_map;
@@ -28,7 +32,7 @@ static struct cpu_map *cpu_map__default_new(void)
cpus->map[i] = i;
cpus->nr = nr_cpus;
- atomic_set(&cpus->refcnt, 1);
+ refcount_set(&cpus->refcnt, 1);
}
return cpus;
@@ -42,7 +46,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
if (cpus != NULL) {
cpus->nr = nr_cpus;
memcpy(cpus->map, tmp_cpus, payload_size);
- atomic_set(&cpus->refcnt, 1);
+ refcount_set(&cpus->refcnt, 1);
}
return cpus;
@@ -251,7 +255,7 @@ struct cpu_map *cpu_map__dummy_new(void)
if (cpus != NULL) {
cpus->nr = 1;
cpus->map[0] = -1;
- atomic_set(&cpus->refcnt, 1);
+ refcount_set(&cpus->refcnt, 1);
}
return cpus;
@@ -268,7 +272,7 @@ struct cpu_map *cpu_map__empty_new(int nr)
for (i = 0; i < nr; i++)
cpus->map[i] = -1;
- atomic_set(&cpus->refcnt, 1);
+ refcount_set(&cpus->refcnt, 1);
}
return cpus;
@@ -277,7 +281,7 @@ struct cpu_map *cpu_map__empty_new(int nr)
static void cpu_map__delete(struct cpu_map *map)
{
if (map) {
- WARN_ONCE(atomic_read(&map->refcnt) != 0,
+ WARN_ONCE(refcount_read(&map->refcnt) != 0,
"cpu_map refcnt unbalanced\n");
free(map);
}
@@ -286,13 +290,13 @@ static void cpu_map__delete(struct cpu_map *map)
struct cpu_map *cpu_map__get(struct cpu_map *map)
{
if (map)
- atomic_inc(&map->refcnt);
+ refcount_inc(&map->refcnt);
return map;
}
void cpu_map__put(struct cpu_map *map)
{
- if (map && atomic_dec_and_test(&map->refcnt))
+ if (map && refcount_dec_and_test(&map->refcnt))
cpu_map__delete(map);
}
@@ -356,7 +360,7 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
/* ensure we process id in increasing order */
qsort(c->map, c->nr, sizeof(int), cmp_ids);
- atomic_set(&c->refcnt, 1);
+ refcount_set(&c->refcnt, 1);
*res = c;
return 0;
}
@@ -442,6 +446,7 @@ static void set_max_cpu_num(void)
/* set up default */
max_cpu_num = 4096;
+ max_present_cpu_num = 4096;
mnt = sysfs__mountpoint();
if (!mnt)
@@ -455,6 +460,17 @@ static void set_max_cpu_num(void)
}
ret = get_max_num(path, &max_cpu_num);
+ if (ret)
+ goto out;
+
+ /* get the highest present cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
+ if (ret == PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+
+ ret = get_max_num(path, &max_present_cpu_num);
out:
if (ret)
@@ -505,6 +521,15 @@ int cpu__max_cpu(void)
return max_cpu_num;
}
+int cpu__max_present_cpu(void)
+{
+ if (unlikely(!max_present_cpu_num))
+ set_max_cpu_num();
+
+ return max_present_cpu_num;
+}
+
+
int cpu__get_node(int cpu)
{
if (unlikely(cpunode_map == NULL)) {
@@ -651,3 +676,49 @@ size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
pr_debug("cpumask list: %s\n", buf);
return ret;
}
+
+static char hex_char(unsigned char val)
+{
+ if (val < 10)
+ return val + '0';
+ if (val < 16)
+ return val - 10 + 'a';
+ return '?';
+}
+
+size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
+{
+ int i, cpu;
+ char *ptr = buf;
+ unsigned char *bitmap;
+ int last_cpu = cpu_map__cpu(map, map->nr - 1);
+
+ bitmap = zalloc((last_cpu + 7) / 8);
+ if (bitmap == NULL) {
+ buf[0] = '\0';
+ return 0;
+ }
+
+ for (i = 0; i < map->nr; i++) {
+ cpu = cpu_map__cpu(map, i);
+ bitmap[cpu / 8] |= 1 << (cpu % 8);
+ }
+
+ for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
+ unsigned char bits = bitmap[cpu / 8];
+
+ if (cpu % 8)
+ bits >>= 4;
+ else
+ bits &= 0xf;
+
+ *ptr++ = hex_char(bits);
+ if ((cpu % 32) == 0 && cpu > 0)
+ *ptr++ = ',';
+ }
+ *ptr = '\0';
+ free(bitmap);
+
+ buf[size - 1] = '\0';
+ return ptr - buf;
+}
OpenPOWER on IntegriCloud