summaryrefslogtreecommitdiffstats
path: root/block/blktrace.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-11 13:35:54 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-01-11 13:35:54 +0100
commit2997c8c4a0b179e8b834a7f30ba4323f2c60ccf4 (patch)
tree319fa1c24c380544233890d6ff480a181bf80e96 /block/blktrace.c
parenta24eab1ed506f3e0bcbcd3f619558935549d4ace (diff)
downloadblackbird-op-linux-2997c8c4a0b179e8b834a7f30ba4323f2c60ccf4.tar.gz
blackbird-op-linux-2997c8c4a0b179e8b834a7f30ba4323f2c60ccf4.zip
block: fix blktrace timestamps
David Dillow reported broken blktrace timestamps. The reason is cpu_clock() which is not a global time source. Fix bkltrace timestamps by using ktime_get() like the networking code does for packet timestamps. This also removes a whole lot of complexity from bkltrace.c and shrinks the code by 500 bytes: text data bss dec hex filename 2888 124 44 3056 bf0 blktrace.o.before 2390 116 44 2550 9f6 blktrace.o.after Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blktrace.c')
-rw-r--r--block/blktrace.c69
1 files changed, 2 insertions, 67 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 498a0a54a6aa..7471621d4ded 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -25,7 +25,6 @@
#include <linux/time.h>
#include <asm/uaccess.h>
-static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
static unsigned int blktrace_seq __read_mostly = 1;
/*
@@ -41,7 +40,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
const int cpu = smp_processor_id();
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
- t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = ktime_to_ns(ktime_get());
t->device = bt->dev;
t->action = action;
t->pid = pid;
@@ -159,7 +158,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
t->sequence = ++(*sequence);
- t->time = cpu_clock(cpu) - per_cpu(blk_trace_cpu_offset, cpu);
+ t->time = ktime_to_ns(ktime_get());
t->sector = sector;
t->bytes = bytes;
t->action = what;
@@ -506,73 +505,9 @@ void blk_trace_shutdown(struct request_queue *q)
}
}
-/*
- * Average offset over two calls to cpu_clock() with a gettimeofday()
- * in the middle
- */
-static void blk_check_time(unsigned long long *t, int this_cpu)
-{
- unsigned long long a, b;
- struct timeval tv;
-
- a = cpu_clock(this_cpu);
- do_gettimeofday(&tv);
- b = cpu_clock(this_cpu);
-
- *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000;
- *t -= (a + b) / 2;
-}
-
-/*
- * calibrate our inter-CPU timings
- */
-static void blk_trace_check_cpu_time(void *data)
-{
- unsigned long long *t;
- int this_cpu = get_cpu();
-
- t = &per_cpu(blk_trace_cpu_offset, this_cpu);
-
- /*
- * Just call it twice, hopefully the second call will be cache hot
- * and a little more precise
- */
- blk_check_time(t, this_cpu);
- blk_check_time(t, this_cpu);
-
- put_cpu();
-}
-
-static void blk_trace_set_ht_offsets(void)
-{
-#if defined(CONFIG_SCHED_SMT)
- int cpu, i;
-
- /*
- * now make sure HT siblings have the same time offset
- */
- preempt_disable();
- for_each_online_cpu(cpu) {
- unsigned long long *cpu_off, *sibling_off;
-
- for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) {
- if (i == cpu)
- continue;
-
- cpu_off = &per_cpu(blk_trace_cpu_offset, cpu);
- sibling_off = &per_cpu(blk_trace_cpu_offset, i);
- *sibling_off = *cpu_off;
- }
- }
- preempt_enable();
-#endif
-}
-
static __init int blk_trace_init(void)
{
mutex_init(&blk_tree_mutex);
- on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1);
- blk_trace_set_ht_offsets();
return 0;
}
OpenPOWER on IntegriCloud