diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/clockchips.h | 154 | ||||
-rw-r--r-- | include/linux/clocksource.h | 25 | ||||
-rw-r--r-- | include/linux/rtc.h | 1 | ||||
-rw-r--r-- | include/linux/tick.h | 190 | ||||
-rw-r--r-- | include/linux/timekeeper_internal.h | 16 | ||||
-rw-r--r-- | include/linux/timekeeping.h | 18 |
6 files changed, 186 insertions, 218 deletions
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 2e4cb67f6e56..96c280b2c263 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -8,33 +8,19 @@ #ifndef _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H -/* Clock event notification values */ -enum clock_event_nofitiers { - CLOCK_EVT_NOTIFY_ADD, - CLOCK_EVT_NOTIFY_BROADCAST_ON, - CLOCK_EVT_NOTIFY_BROADCAST_OFF, - CLOCK_EVT_NOTIFY_BROADCAST_FORCE, - CLOCK_EVT_NOTIFY_BROADCAST_ENTER, - CLOCK_EVT_NOTIFY_BROADCAST_EXIT, - CLOCK_EVT_NOTIFY_SUSPEND, - CLOCK_EVT_NOTIFY_RESUME, - CLOCK_EVT_NOTIFY_CPU_DYING, - CLOCK_EVT_NOTIFY_CPU_DEAD, -}; - -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD +#ifdef CONFIG_GENERIC_CLOCKEVENTS -#include <linux/clocksource.h> -#include <linux/cpumask.h> -#include <linux/ktime.h> -#include <linux/notifier.h> +# include <linux/clocksource.h> +# include <linux/cpumask.h> +# include <linux/ktime.h> +# include <linux/notifier.h> struct clock_event_device; struct module; -/* Clock event mode commands */ +/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */ enum clock_event_mode { - CLOCK_EVT_MODE_UNUSED = 0, + CLOCK_EVT_MODE_UNUSED, CLOCK_EVT_MODE_SHUTDOWN, CLOCK_EVT_MODE_PERIODIC, CLOCK_EVT_MODE_ONESHOT, @@ -42,30 +28,49 @@ enum clock_event_mode { }; /* + * Possible states of a clock event device. + * + * DETACHED: Device is not used by clockevents core. Initial state or can be + * reached from SHUTDOWN. + * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT. + * PERIODIC: Device is programmed to generate events periodically. Can be + * reached from DETACHED or SHUTDOWN. + * ONESHOT: Device is programmed to generate event only once. Can be reached + * from DETACHED or SHUTDOWN. + */ +enum clock_event_state { + CLOCK_EVT_STATE_DETACHED, + CLOCK_EVT_STATE_SHUTDOWN, + CLOCK_EVT_STATE_PERIODIC, + CLOCK_EVT_STATE_ONESHOT, +}; + +/* * Clock event features */ -#define CLOCK_EVT_FEAT_PERIODIC 0x000001 -#define CLOCK_EVT_FEAT_ONESHOT 0x000002 -#define CLOCK_EVT_FEAT_KTIME 0x000004 +# define CLOCK_EVT_FEAT_PERIODIC 0x000001 +# define CLOCK_EVT_FEAT_ONESHOT 0x000002 +# define CLOCK_EVT_FEAT_KTIME 0x000004 + /* - * x86(64) specific misfeatures: + * x86(64) specific (mis)features: * * - Clockevent source stops in C3 State and needs broadcast support. * - Local APIC timer is used as a dummy device. */ -#define CLOCK_EVT_FEAT_C3STOP 0x000008 -#define CLOCK_EVT_FEAT_DUMMY 0x000010 +# define CLOCK_EVT_FEAT_C3STOP 0x000008 +# define CLOCK_EVT_FEAT_DUMMY 0x000010 /* * Core shall set the interrupt affinity dynamically in broadcast mode */ -#define CLOCK_EVT_FEAT_DYNIRQ 0x000020 -#define CLOCK_EVT_FEAT_PERCPU 0x000040 +# define CLOCK_EVT_FEAT_DYNIRQ 0x000020 +# define CLOCK_EVT_FEAT_PERCPU 0x000040 /* * Clockevent device is based on a hrtimer for broadcast */ -#define CLOCK_EVT_FEAT_HRTIMER 0x000080 +# define CLOCK_EVT_FEAT_HRTIMER 0x000080 /** * struct clock_event_device - clock event device descriptor @@ -78,10 +83,15 @@ enum clock_event_mode { * @min_delta_ns: minimum delta value in ns * @mult: nanosecond to cycles multiplier * @shift: nanoseconds to cycles divisor (power of two) - * @mode: operating mode assigned by the management code + * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE + * @state: current state of the device, assigned by the core code * @features: features * @retries: number of forced programming retries - * @set_mode: set mode function + * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME. + * @set_state_periodic: switch state to periodic, if !set_mode + * @set_state_oneshot: switch state to oneshot, if !set_mode + * @set_state_shutdown: switch state to shutdown, if !set_mode + * @tick_resume: resume clkevt device, if !set_mode * @broadcast: function to broadcast events * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration @@ -95,22 +105,31 @@ enum clock_event_mode { */ struct clock_event_device { void (*event_handler)(struct clock_event_device *); - int (*set_next_event)(unsigned long evt, - struct clock_event_device *); - int (*set_next_ktime)(ktime_t expires, - struct clock_event_device *); + int (*set_next_event)(unsigned long evt, struct clock_event_device *); + int (*set_next_ktime)(ktime_t expires, struct clock_event_device *); ktime_t next_event; u64 max_delta_ns; u64 min_delta_ns; u32 mult; u32 shift; enum clock_event_mode mode; + enum clock_event_state state; unsigned int features; unsigned long retries; + /* + * State transition callback(s): Only one of the two groups should be + * defined: + * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME. + * - set_state_{shutdown|periodic|oneshot}(), tick_resume(). + */ + void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); + int (*set_state_periodic)(struct clock_event_device *); + int (*set_state_oneshot)(struct clock_event_device *); + int (*set_state_shutdown)(struct clock_event_device *); + int (*tick_resume)(struct clock_event_device *); + void (*broadcast)(const struct cpumask *mask); - void (*set_mode)(enum clock_event_mode mode, - struct clock_event_device *); void (*suspend)(struct clock_event_device *); void (*resume)(struct clock_event_device *); unsigned long min_delta_ticks; @@ -136,18 +155,18 @@ struct clock_event_device { * * factor = (clock_ticks << shift) / nanoseconds */ -static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, - int shift) +static inline unsigned long +div_sc(unsigned long ticks, unsigned long nsec, int shift) { - uint64_t tmp = ((uint64_t)ticks) << shift; + u64 tmp = ((u64)ticks) << shift; do_div(tmp, nsec); + return (unsigned long) tmp; } /* Clock event layer functions */ -extern u64 clockevent_delta2ns(unsigned long latch, - struct clock_event_device *evt); +extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); @@ -158,57 +177,42 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); -extern void clockevents_exchange_device(struct clock_event_device *old, - struct clock_event_device *new); -extern void clockevents_set_mode(struct clock_event_device *dev, - enum clock_event_mode mode); -extern int clockevents_program_event(struct clock_event_device *dev, - ktime_t expires, bool force); - -extern void clockevents_handle_noop(struct clock_event_device *dev); - static inline void clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) { - return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, - freq, minsec); + return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec); } extern void clockevents_suspend(void); extern void clockevents_resume(void); -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST +# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST extern void tick_broadcast(const struct cpumask *mask); -#else -#define tick_broadcast NULL -#endif +# else +# define tick_broadcast NULL +# endif extern int tick_receive_broadcast(void); -#endif +# endif -#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) +# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void tick_setup_hrtimer_broadcast(void); extern int tick_check_broadcast_expired(void); -#else +# else static inline int tick_check_broadcast_expired(void) { return 0; } -static inline void tick_setup_hrtimer_broadcast(void) {}; -#endif +static inline void tick_setup_hrtimer_broadcast(void) { } +# endif -#ifdef CONFIG_GENERIC_CLOCKEVENTS extern int clockevents_notify(unsigned long reason, void *arg); -#else -static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; } -#endif - -#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ -static inline void clockevents_suspend(void) {} -static inline void clockevents_resume(void) {} +#else /* !CONFIG_GENERIC_CLOCKEVENTS: */ +static inline void clockevents_suspend(void) { } +static inline void clockevents_resume(void) { } static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; } static inline int tick_check_broadcast_expired(void) { return 0; } -static inline void tick_setup_hrtimer_broadcast(void) {}; +static inline void tick_setup_hrtimer_broadcast(void) { } -#endif +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ -#endif +#endif /* _LINUX_CLOCKCHIPS_H */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 9c78d15d33e4..135509821c39 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -56,6 +56,7 @@ struct module; * @shift: cycle to nanosecond divisor (power of two) * @max_idle_ns: max idle time permitted by the clocksource (nsecs) * @maxadj: maximum adjustment value to mult (~11%) + * @max_cycles: maximum safe cycle value which won't overflow on multiplication * @flags: flags describing special properties * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary @@ -76,7 +77,7 @@ struct clocksource { #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA struct arch_clocksource_data archdata; #endif - + u64 max_cycles; const char *name; struct list_head list; int rating; @@ -178,7 +179,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) } -extern int clocksource_register(struct clocksource*); extern int clocksource_unregister(struct clocksource*); extern void clocksource_touch_watchdog(void); extern struct clocksource* clocksource_get_next(void); @@ -189,7 +189,7 @@ extern struct clocksource * __init clocksource_default_clock(void); extern void clocksource_mark_unstable(struct clocksource *cs); extern u64 -clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask); +clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); extern void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); @@ -200,7 +200,16 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); extern int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); extern void -__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); +__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); + +/* + * Don't call this unless you are a default clocksource + * (AKA: jiffies) and absolutely have to. + */ +static inline int __clocksource_register(struct clocksource *cs) +{ + return __clocksource_register_scale(cs, 1, 0); +} static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) { @@ -212,14 +221,14 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) return __clocksource_register_scale(cs, 1000, khz); } -static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) +static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) { - __clocksource_updatefreq_scale(cs, 1, hz); + __clocksource_update_freq_scale(cs, 1, hz); } -static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) +static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) { - __clocksource_updatefreq_scale(cs, 1000, khz); + __clocksource_update_freq_scale(cs, 1000, khz); } diff --git a/include/linux/rtc.h b/include/linux/rtc.h index dcad7ee0d746..8dcf6825fa88 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -77,6 +77,7 @@ struct rtc_class_ops { int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); + int (*set_mmss64)(struct device *, time64_t secs); int (*set_mmss)(struct device *, unsigned long secs); int (*read_callback)(struct device *, int data); int (*alarm_irq_enable)(struct device *, unsigned int enabled); diff --git a/include/linux/tick.h b/include/linux/tick.h index 9c085dc12ae9..f8492da57ad3 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -1,7 +1,5 @@ -/* linux/include/linux/tick.h - * - * This file contains the structure definitions for tick related functions - * +/* + * Tick related global functions */ #ifndef _LINUX_TICK_H #define _LINUX_TICK_H @@ -9,149 +7,99 @@ #include <linux/clockchips.h> #include <linux/irqflags.h> #include <linux/percpu.h> -#include <linux/hrtimer.h> #include <linux/context_tracking_state.h> #include <linux/cpumask.h> #include <linux/sched.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS - -enum tick_device_mode { - TICKDEV_MODE_PERIODIC, - TICKDEV_MODE_ONESHOT, -}; - -struct tick_device { - struct clock_event_device *evtdev; - enum tick_device_mode mode; -}; - -enum tick_nohz_mode { - NOHZ_MODE_INACTIVE, - NOHZ_MODE_LOWRES, - NOHZ_MODE_HIGHRES, -}; - -/** - * struct tick_sched - sched tick emulation and no idle tick control/stats - * @sched_timer: hrtimer to schedule the periodic tick in high - * resolution mode - * @last_tick: Store the last tick expiry time when the tick - * timer is modified for nohz sleeps. This is necessary - * to resume the tick timer operation in the timeline - * when the CPU returns from nohz sleep. - * @tick_stopped: Indicator that the idle tick has been stopped - * @idle_jiffies: jiffies at the entry to idle for idle time accounting - * @idle_calls: Total number of idle calls - * @idle_sleeps: Number of idle calls, where the sched tick was stopped - * @idle_entrytime: Time when the idle call was entered - * @idle_waketime: Time when the idle was interrupted - * @idle_exittime: Time when the idle state was left - * @idle_sleeptime: Sum of the time slept in idle with sched tick stopped - * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding - * @sleep_length: Duration of the current idle sleep - * @do_timer_lst: CPU was the last one doing do_timer before going idle - */ -struct tick_sched { - struct hrtimer sched_timer; - unsigned long check_clocks; - enum tick_nohz_mode nohz_mode; - ktime_t last_tick; - int inidle; - int tick_stopped; - unsigned long idle_jiffies; - unsigned long idle_calls; - unsigned long idle_sleeps; - int idle_active; - ktime_t idle_entrytime; - ktime_t idle_waketime; - ktime_t idle_exittime; - ktime_t idle_sleeptime; - ktime_t iowait_sleeptime; - ktime_t sleep_length; - unsigned long last_jiffies; - unsigned long next_jiffies; - ktime_t idle_expires; - int do_timer_last; -}; - extern void __init tick_init(void); -extern int tick_is_oneshot_available(void); -extern struct tick_device *tick_get_device(int cpu); - extern void tick_freeze(void); extern void tick_unfreeze(void); +/* Should be core only, but ARM BL switcher requires it */ +extern void tick_suspend_local(void); +/* Should be core only, but XEN resume magic and ARM BL switcher require it */ +extern void tick_resume_local(void); +extern void tick_handover_do_timer(void); +extern void tick_cleanup_dead_cpu(int cpu); +#else /* CONFIG_GENERIC_CLOCKEVENTS */ +static inline void tick_init(void) { } +static inline void tick_freeze(void) { } +static inline void tick_unfreeze(void) { } +static inline void tick_suspend_local(void) { } +static inline void tick_resume_local(void) { } +static inline void tick_handover_do_timer(void) { } +static inline void tick_cleanup_dead_cpu(int cpu) { } +#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ -# ifdef CONFIG_HIGH_RES_TIMERS -extern int tick_init_highres(void); -extern int tick_program_event(ktime_t expires, int force); -extern void tick_setup_sched_timer(void); -# endif - -# if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS -extern void tick_cancel_sched_timer(int cpu); -# else -static inline void tick_cancel_sched_timer(int cpu) { } -# endif - -# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -extern struct tick_device *tick_get_broadcast_device(void); -extern struct cpumask *tick_get_broadcast_mask(void); - -# ifdef CONFIG_TICK_ONESHOT -extern struct cpumask *tick_get_broadcast_oneshot_mask(void); -# endif - -# endif /* BROADCAST */ - -# ifdef CONFIG_TICK_ONESHOT -extern void tick_clock_notify(void); -extern int tick_check_oneshot_change(int allow_nohz); -extern struct tick_sched *tick_get_tick_sched(int cpu); +#ifdef CONFIG_TICK_ONESHOT extern void tick_irq_enter(void); -extern int tick_oneshot_mode_active(void); # ifndef arch_needs_cpu # define arch_needs_cpu() (0) # endif # else -static inline void tick_clock_notify(void) { } -static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_irq_enter(void) { } -static inline int tick_oneshot_mode_active(void) { return 0; } -# endif +#endif -#else /* CONFIG_GENERIC_CLOCKEVENTS */ -static inline void tick_init(void) { } -static inline void tick_freeze(void) { } -static inline void tick_unfreeze(void) { } -static inline void tick_cancel_sched_timer(int cpu) { } -static inline void tick_clock_notify(void) { } -static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } -static inline void tick_irq_enter(void) { } -static inline int tick_oneshot_mode_active(void) { return 0; } -#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) +extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu); +#else +static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { } +#endif -# ifdef CONFIG_NO_HZ_COMMON -DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched); +enum tick_broadcast_mode { + TICK_BROADCAST_OFF, + TICK_BROADCAST_ON, + TICK_BROADCAST_FORCE, +}; + +enum tick_broadcast_state { + TICK_BROADCAST_EXIT, + TICK_BROADCAST_ENTER, +}; + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +extern void tick_broadcast_control(enum tick_broadcast_mode mode); +#else +static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } +#endif /* BROADCAST */ + +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) +extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); +#else +static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } +#endif -static inline int tick_nohz_tick_stopped(void) +static inline void tick_broadcast_enable(void) +{ + tick_broadcast_control(TICK_BROADCAST_ON); +} +static inline void tick_broadcast_disable(void) +{ + tick_broadcast_control(TICK_BROADCAST_OFF); +} +static inline void tick_broadcast_force(void) +{ + tick_broadcast_control(TICK_BROADCAST_FORCE); +} +static inline int tick_broadcast_enter(void) { - return __this_cpu_read(tick_cpu_sched.tick_stopped); + return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER); +} +static inline void tick_broadcast_exit(void) +{ + tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT); } +#ifdef CONFIG_NO_HZ_COMMON +extern int tick_nohz_tick_stopped(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); - -# else /* !CONFIG_NO_HZ_COMMON */ -static inline int tick_nohz_tick_stopped(void) -{ - return 0; -} - +#else /* !CONFIG_NO_HZ_COMMON */ +static inline int tick_nohz_tick_stopped(void) { return 0; } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } @@ -163,7 +111,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } -# endif /* !CONFIG_NO_HZ_COMMON */ +#endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 05af9a334893..fb86963859c7 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -16,16 +16,16 @@ * @read: Read function of @clock * @mask: Bitmask for two's complement subtraction of non 64bit clocks * @cycle_last: @clock cycle value at last update - * @mult: NTP adjusted multiplier for scaled math conversion + * @mult: (NTP adjusted) multiplier for scaled math conversion * @shift: Shift value for scaled math conversion * @xtime_nsec: Shifted (fractional) nano seconds offset for readout - * @base_mono: ktime_t (nanoseconds) base time for readout + * @base: ktime_t (nanoseconds) base time for readout * * This struct has size 56 byte on 64 bit. Together with a seqcount it * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used - * for a fast NMI safe accessor to clock monotonic. + * for a fast NMI safe accessors. */ struct tk_read_base { struct clocksource *clock; @@ -35,12 +35,13 @@ struct tk_read_base { u32 mult; u32 shift; u64 xtime_nsec; - ktime_t base_mono; + ktime_t base; }; /** * struct timekeeper - Structure holding internal timekeeping values. - * @tkr: The readout base structure + * @tkr_mono: The readout base structure for CLOCK_MONOTONIC + * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW * @xtime_sec: Current CLOCK_REALTIME time in seconds * @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset @@ -48,7 +49,6 @@ struct tk_read_base { * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai * @tai_offset: The current UTC to TAI offset in seconds - * @base_raw: Monotonic raw base time in ktime_t format * @raw_time: Monotonic raw base time in timespec64 format * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP @@ -76,7 +76,8 @@ struct tk_read_base { * used instead. */ struct timekeeper { - struct tk_read_base tkr; + struct tk_read_base tkr_mono; + struct tk_read_base tkr_raw; u64 xtime_sec; unsigned long ktime_sec; struct timespec64 wall_to_monotonic; @@ -84,7 +85,6 @@ struct timekeeper { ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; - ktime_t base_raw; struct timespec64 raw_time; /* The following members are for timekeeping internal use */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 3eaae4754275..99176af216af 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -214,12 +214,18 @@ static inline u64 ktime_get_boot_ns(void) return ktime_to_ns(ktime_get_boottime()); } +static inline u64 ktime_get_tai_ns(void) +{ + return ktime_to_ns(ktime_get_clocktai()); +} + static inline u64 ktime_get_raw_ns(void) { return ktime_to_ns(ktime_get_raw()); } extern u64 ktime_get_mono_fast_ns(void); +extern u64 ktime_get_raw_fast_ns(void); /* * Timespec interfaces utilizing the ktime based ones @@ -242,6 +248,9 @@ static inline void timekeeping_clocktai(struct timespec *ts) /* * RTC specific */ +extern bool timekeeping_rtc_skipsuspend(void); +extern bool timekeeping_rtc_skipresume(void); + extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* @@ -253,17 +262,14 @@ extern void getnstime_raw_and_real(struct timespec *ts_raw, /* * Persistent clock related interfaces */ -extern bool persistent_clock_exist; extern int persistent_clock_is_local; -static inline bool has_persistent_clock(void) -{ - return persistent_clock_exist; -} - extern void read_persistent_clock(struct timespec *ts); +extern void read_persistent_clock64(struct timespec64 *ts); extern void read_boot_clock(struct timespec *ts); +extern void read_boot_clock64(struct timespec64 *ts); extern int update_persistent_clock(struct timespec now); +extern int update_persistent_clock64(struct timespec64 now); #endif |