summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h21
1 files changed, 6 insertions, 15 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f2c2fc011e6b..9ef412666df1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1799,42 +1799,33 @@ int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
static inline void set_start_time_ns(struct request *req)
{
- preempt_disable();
- req->cgroup_start_time_ns = sched_clock();
- preempt_enable();
+ req->cgroup_start_time_ns = ktime_get_ns();
}
static inline void set_io_start_time_ns(struct request *req)
{
- preempt_disable();
- req->cgroup_io_start_time_ns = sched_clock();
- preempt_enable();
+ req->cgroup_io_start_time_ns = ktime_get_ns();
}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline u64 rq_start_time_ns(struct request *req)
{
return req->cgroup_start_time_ns;
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline u64 rq_io_start_time_ns(struct request *req)
{
return req->cgroup_io_start_time_ns;
}
#else
static inline void set_start_time_ns(struct request *req) {}
static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline u64 rq_start_time_ns(struct request *req)
{
return 0;
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline u64 rq_io_start_time_ns(struct request *req)
{
return 0;
}
OpenPOWER on IntegriCloud