From b0797b60d0067fe437baa97a743c7d9de98fd769 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:21 -0700 Subject: powerpc: Simplify update_vsyscall Currently powerpc's update_vsyscall calls an inline update_gtod. However, both are straightforward, and there are no other users, so this patch merges update_gtod into update_vsyscall. Signed-off-by: John Stultz Cc: Anton Blanchard Cc: Paul Mackerras LKML-Reference: <1279068988-21864-5-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- arch/powerpc/kernel/time.c | 55 +++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 30 deletions(-) (limited to 'arch/powerpc/kernel/time.c') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0441bbdadbd1..6fcd64886d1b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -423,30 +423,6 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); -static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, - u64 new_tb_to_xs) -{ - /* - * tb_update_count is used to allow the userspace gettimeofday code - * to assure itself that it sees a consistent view of the tb_to_xs and - * stamp_xsec variables. It reads the tb_update_count, then reads - * tb_to_xs and stamp_xsec and then reads tb_update_count again. If - * the two values of tb_update_count match and are even then the - * tb_to_xs and stamp_xsec values are consistent. If not, then it - * loops back and reads them again until this criteria is met. - * We expect the caller to have done the first increment of - * vdso_data->tb_update_count already. - */ - vdso_data->tb_orig_stamp = new_tb_stamp; - vdso_data->stamp_xsec = new_stamp_xsec; - vdso_data->tb_to_xs = new_tb_to_xs; - vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; - vdso_data->stamp_xtime = xtime; - smp_wmb(); - ++(vdso_data->tb_update_count); -} - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -876,7 +852,7 @@ static cycle_t timebase_read(struct clocksource *cs) void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, u32 mult) { - u64 t2x, stamp_xsec; + u64 new_tb_to_xs, new_stamp_xsec; if (clock != &clocksource_timebase) return; @@ -887,11 +863,30 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, /* XXX this assumes clock->shift == 22 */ /* 4611686018 ~= 2^(20+64-22) / 1e9 */ - t2x = (u64) mult * 4611686018ULL; - stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; - do_div(stamp_xsec, 1000000000); - stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; - update_gtod(clock->cycle_last, stamp_xsec, t2x); + new_tb_to_xs = (u64) mult * 4611686018ULL; + new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(new_stamp_xsec, 1000000000); + new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + + /* + * tb_update_count is used to allow the userspace gettimeofday code + * to assure itself that it sees a consistent view of the tb_to_xs and + * stamp_xsec variables. It reads the tb_update_count, then reads + * tb_to_xs and stamp_xsec and then reads tb_update_count again. If + * the two values of tb_update_count match and are even then the + * tb_to_xs and stamp_xsec values are consistent. If not, then it + * loops back and reads them again until this criteria is met. + * We expect the caller to have done the first increment of + * vdso_data->tb_update_count already. + */ + vdso_data->tb_orig_stamp = clock->cycle_last; + vdso_data->stamp_xsec = new_stamp_xsec; + vdso_data->tb_to_xs = new_tb_to_xs; + vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->stamp_xtime = xtime; + smp_wmb(); + ++(vdso_data->tb_update_count); } void update_vsyscall_tz(void) -- cgit v1.2.1 From 06d518e3dfb25334282c7e38b4d7a4eada215f6d Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:22 -0700 Subject: powerpc: Cleanup xtime usage This removes powerpc's direct xtime usage, allowing for further generic timeekeping cleanups Signed-off-by: John Stultz Cc: Paul Mackerras Cc: Anton Blanchard LKML-Reference: <1279068988-21864-6-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- arch/powerpc/kernel/time.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel/time.c') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 6fcd64886d1b..0711d60f40b0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -864,9 +864,9 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, /* XXX this assumes clock->shift == 22 */ /* 4611686018 ~= 2^(20+64-22) / 1e9 */ new_tb_to_xs = (u64) mult * 4611686018ULL; - new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; do_div(new_stamp_xsec, 1000000000); - new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; /* * tb_update_count is used to allow the userspace gettimeofday code @@ -884,7 +884,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; - vdso_data->stamp_xtime = xtime; + vdso_data->stamp_xtime = *wall_time; smp_wmb(); ++(vdso_data->tb_update_count); } @@ -1093,7 +1093,7 @@ void __init time_init(void) vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; + vdso_data->stamp_xsec = (u64) get_seconds() * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; write_sequnlock_irqrestore(&xtime_lock, flags); -- cgit v1.2.1 From 7615856ebfee52b080c22d263ca4debbd0df0ac1 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 13 Jul 2010 17:56:23 -0700 Subject: timkeeping: Fix update_vsyscall to provide wall_to_monotonic offset update_vsyscall() did not provide the wall_to_monotoinc offset, so arch specific implementations tend to reference wall_to_monotonic directly. This limits future cleanups in the timekeeping core, so this patch fixes the update_vsyscall interface to provide wall_to_monotonic, allowing wall_to_monotonic to be made static as planned in Documentation/feature-removal-schedule.txt Signed-off-by: John Stultz Cc: Martin Schwidefsky Cc: Anton Blanchard Cc: Paul Mackerras Cc: Tony Luck LKML-Reference: <1279068988-21864-7-git-send-email-johnstul@us.ibm.com> Signed-off-by: Thomas Gleixner --- arch/powerpc/kernel/time.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel/time.c') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0711d60f40b0..e215f76bba1c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -849,8 +849,8 @@ static cycle_t timebase_read(struct clocksource *cs) return (cycle_t)get_tb(); } -void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, - u32 mult) +void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, + struct clocksource *clock, u32 mult) { u64 new_tb_to_xs, new_stamp_xsec; @@ -882,8 +882,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, vdso_data->tb_orig_stamp = clock->cycle_last; vdso_data->stamp_xsec = new_stamp_xsec; vdso_data->tb_to_xs = new_tb_to_xs; - vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->wtom_clock_sec = wtm->tv_sec; + vdso_data->wtom_clock_nsec = wtm->tv_nsec; vdso_data->stamp_xtime = *wall_time; smp_wmb(); ++(vdso_data->tb_update_count); -- cgit v1.2.1 From 0e469db8f70c2645acdc90981c0480a3e19d5e68 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 20 Jun 2010 19:03:08 +0000 Subject: powerpc: Rework VDSO gettimeofday to prevent time going backwards Currently it is possible for userspace to see the result of gettimeofday() going backwards by 1 microsecond, assuming that userspace is using the gettimeofday() in the VDSO. The VDSO gettimeofday() algorithm computes the time in "xsecs", which are units of 2^-20 seconds, or approximately 0.954 microseconds, using the algorithm now = (timebase - tb_orig_stamp) * tb_to_xs + stamp_xsec and then converts the time in xsecs to seconds and microseconds. The kernel updates the tb_orig_stamp and stamp_xsec values every tick in update_vsyscall(). If the length of the tick is not an integer number of xsecs, then some precision is lost in converting the current time to xsecs. For example, with CONFIG_HZ=1000, the tick is 1ms long, which is 1048.576 xsecs. That means that stamp_xsec will advance by either 1048 or 1049 on each tick. With the right conditions, it is possible for userspace to get (timebase - tb_orig_stamp) * tb_to_xs being 1049 if the kernel is slightly late in updating the vdso_datapage, and then for stamp_xsec to advance by 1048 when the kernel does update it, and for userspace to then see (timebase - tb_orig_stamp) * tb_to_xs being zero due to integer truncation. The result is that time appears to go backwards by 1 microsecond. To fix this we change the VDSO gettimeofday to use a new field in the VDSO datapage which stores the nanoseconds part of the time as a fractional number of seconds in a 0.32 binary fraction format. (Or put another way, as a 32-bit number in units of 0.23283 ns.) This is convenient because we can use the mulhwu instruction to convert it to either microseconds or nanoseconds. Since it turns out that computing the time of day using this new field is simpler than either using stamp_xsec (as gettimeofday does) or stamp_xtime.tv_nsec (as clock_gettime does), this converts both gettimeofday and clock_gettime to use the new field. The existing __do_get_tspec function is converted to use the new field and take a parameter in r7 that indicates the desired resolution, 1,000,000 for microseconds or 1,000,000,000 for nanoseconds. The __do_get_xsec function is then unused and is deleted. The new algorithm is now = ((timebase - tb_orig_stamp) << 12) * tb_to_xs + (stamp_xtime_seconds << 32) + stamp_sec_fraction with 'now' in units of 2^-32 seconds. That is then converted to seconds and either microseconds or nanoseconds with seconds = now >> 32 partseconds = ((now & 0xffffffff) * resolution) >> 32 The 32-bit VDSO code also makes a further simplification: it ignores the bottom 32 bits of the tb_to_xs value, which is a 0.64 format binary fraction. Doing so gets rid of 4 multiply instructions. Assuming a timebase frequency of 1GHz or less and an update interval of no more than 10ms, the upper 32 bits of tb_to_xs will be at least 4503599, so the error from ignoring the low 32 bits will be at most 2.2ns, which is more than an order of magnitude less than the time taken to do gettimeofday or clock_gettime on our fastest processors, so there is no possibility of seeing inconsistent values due to this. This also moves update_gtod() down next to its only caller, and makes update_vsyscall use the time passed in via the wall_time argument rather than accessing xtime directly. At present, wall_time always points to xtime, but that could change in future. Signed-off-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 61 ++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'arch/powerpc/kernel/time.c') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0441bbdadbd1..5adebaf47f13 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -423,30 +423,6 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); -static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, - u64 new_tb_to_xs) -{ - /* - * tb_update_count is used to allow the userspace gettimeofday code - * to assure itself that it sees a consistent view of the tb_to_xs and - * stamp_xsec variables. It reads the tb_update_count, then reads - * tb_to_xs and stamp_xsec and then reads tb_update_count again. If - * the two values of tb_update_count match and are even then the - * tb_to_xs and stamp_xsec values are consistent. If not, then it - * loops back and reads them again until this criteria is met. - * We expect the caller to have done the first increment of - * vdso_data->tb_update_count already. - */ - vdso_data->tb_orig_stamp = new_tb_stamp; - vdso_data->stamp_xsec = new_stamp_xsec; - vdso_data->tb_to_xs = new_tb_to_xs; - vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; - vdso_data->stamp_xtime = xtime; - smp_wmb(); - ++(vdso_data->tb_update_count); -} - #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { @@ -873,10 +849,37 @@ static cycle_t timebase_read(struct clocksource *cs) return (cycle_t)get_tb(); } +static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, + u64 new_tb_to_xs, struct timespec *now, + u32 frac_sec) +{ + /* + * tb_update_count is used to allow the userspace gettimeofday code + * to assure itself that it sees a consistent view of the tb_to_xs and + * stamp_xsec variables. It reads the tb_update_count, then reads + * tb_to_xs and stamp_xsec and then reads tb_update_count again. If + * the two values of tb_update_count match and are even then the + * tb_to_xs and stamp_xsec values are consistent. If not, then it + * loops back and reads them again until this criteria is met. + * We expect the caller to have done the first increment of + * vdso_data->tb_update_count already. + */ + vdso_data->tb_orig_stamp = new_tb_stamp; + vdso_data->stamp_xsec = new_stamp_xsec; + vdso_data->tb_to_xs = new_tb_to_xs; + vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; + vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; + vdso_data->stamp_xtime = *now; + vdso_data->stamp_sec_fraction = frac_sec; + smp_wmb(); + ++(vdso_data->tb_update_count); +} + void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, u32 mult) { u64 t2x, stamp_xsec; + u32 frac_sec; if (clock != &clocksource_timebase) return; @@ -888,10 +891,14 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock, /* XXX this assumes clock->shift == 22 */ /* 4611686018 ~= 2^(20+64-22) / 1e9 */ t2x = (u64) mult * 4611686018ULL; - stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC; do_div(stamp_xsec, 1000000000); - stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; - update_gtod(clock->cycle_last, stamp_xsec, t2x); + stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC; + + BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC); + /* this is tv_nsec / 1e9 as a 0.32 fraction */ + frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32; + update_gtod(clock->cycle_last, stamp_xsec, t2x, wall_time, frac_sec); } void update_vsyscall_tz(void) -- cgit v1.2.1 From d75d68cfef4936ddf38d2694ae2f7d1f7c45db05 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 20 Jun 2010 19:04:14 +0000 Subject: powerpc: Clean up obsolete code relating to decrementer and timebase Since the decrementer and timekeeping code was moved over to using the generic clockevents and timekeeping infrastructure, several variables and functions have been obsolete and effectively unused. This deletes them. In particular, wakeup_decrementer() is no longer needed since the generic code reprograms the decrementer as part of the process of resuming the timekeeping code, which happens during sysdev resume. Thus the wakeup_decrementer calls in the suspend_enter methods for 52xx platforms have been removed. The call in the powermac cpu frequency change code has been replaced by set_dec(1), which will cause a timer interrupt as soon as interrupts are enabled, and the generic code will then reprogram the decrementer with the correct value. This also simplifies the generic_suspend_en/disable_irqs functions and makes them static since they are not referenced outside time.c. The preempt_enable/disable calls are removed because the generic code has disabled all but the boot cpu at the point where these functions are called, so we can't be moved to another cpu. Signed-off-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 136 +-------------------------------------------- 1 file changed, 3 insertions(+), 133 deletions(-) (limited to 'arch/powerpc/kernel/time.c') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 5adebaf47f13..ccb8759c8532 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ -u64 tb_to_xs; -unsigned tb_to_us; - -#define TICKLEN_SCALE NTP_SCALE_SHIFT -static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ -static u64 ticklen_to_xs; /* 0.64 fraction */ - -/* If last_tick_len corresponds to about 1/HZ seconds, then - last_tick_len << TICKLEN_SHIFT will be about 2^63. */ -#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -174,7 +164,6 @@ unsigned long ppc_proc_freq; EXPORT_SYMBOL(ppc_proc_freq); unsigned long ppc_tb_freq; -static u64 tb_last_jiffy __cacheline_aligned_in_smp; static DEFINE_PER_CPU(u64, last_jiffy); #ifdef CONFIG_VIRT_CPU_ACCOUNTING @@ -446,7 +435,6 @@ EXPORT_SYMBOL(profile_pc); static int __init iSeries_tb_recal(void) { - struct div_result divres; unsigned long titan, tb; /* Make sure we only run on iSeries */ @@ -477,10 +465,7 @@ static int __init iSeries_tb_recal(void) tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; tb_ticks_per_sec = new_tb_ticks_per_sec; calc_cputime_factors(); - div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); - tb_to_xs = divres.result_low; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->tb_to_xs = tb_to_xs; setup_cputime_one_jiffy(); } else { @@ -643,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs) trace_timer_interrupt_exit(regs); } -void wakeup_decrementer(void) -{ - unsigned long ticks; - - /* - * The timebase gets saved on sleep and restored on wakeup, - * so all we need to do is to reset the decrementer. - */ - ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); - if (ticks < tb_ticks_per_jiffy) - ticks = tb_ticks_per_jiffy - ticks; - else - ticks = 1; - set_dec(ticks); -} - #ifdef CONFIG_SUSPEND -void generic_suspend_disable_irqs(void) +static void generic_suspend_disable_irqs(void) { - preempt_disable(); - /* Disable the decrementer, so that it doesn't interfere * with suspending. */ @@ -673,12 +640,9 @@ void generic_suspend_disable_irqs(void) set_dec(0x7fffffff); } -void generic_suspend_enable_irqs(void) +static void generic_suspend_enable_irqs(void) { - wakeup_decrementer(); - local_irq_enable(); - preempt_enable(); } /* Overrides the weak version in kernel/power/main.c */ @@ -698,23 +662,6 @@ void arch_suspend_enable_irqs(void) } #endif -#ifdef CONFIG_SMP -void __init smp_space_timers(unsigned int max_cpus) -{ - int i; - u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); - - /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ - previous_tb -= tb_ticks_per_jiffy; - - for_each_possible_cpu(i) { - if (i == boot_cpuid) - continue; - per_cpu(last_jiffy, i) = previous_tb; - } -} -#endif - /* * Scheduler clock - returns current time in nanosec units. * @@ -1014,15 +961,13 @@ void secondary_cpu_time_init(void) /* This function is only called on the boot processor */ void __init time_init(void) { - unsigned long flags; struct div_result res; - u64 scale, x; + u64 scale; unsigned shift; if (__USE_RTC()) { /* 601 processor: dec counts down by 128 every 128ns */ ppc_tb_freq = 1000000000; - tb_last_jiffy = get_rtcl(); } else { /* Normal PowerPC with timebase register */ ppc_md.calibrate_decr(); @@ -1030,49 +975,14 @@ void __init time_init(void) ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); - tb_last_jiffy = get_tb(); } tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; - tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); calc_cputime_factors(); setup_cputime_one_jiffy(); - /* - * Calculate the length of each tick in ns. It will not be - * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. - * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, - * rounded up. - */ - x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; - do_div(x, ppc_tb_freq); - tick_nsec = x; - last_tick_len = x << TICKLEN_SCALE; - - /* - * Compute ticklen_to_xs, which is a factor which gets multiplied - * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. - * It is computed as: - * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) - * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT - * which turns out to be N = 51 - SHIFT_HZ. - * This gives the result as a 0.64 fixed-point fraction. - * That value is reduced by an offset amounting to 1 xsec per - * 2^31 timebase ticks to avoid problems with time going backwards - * by 1 xsec when we do timer_recalc_offset due to losing the - * fractional xsec. That offset is equal to ppc_tb_freq/2^51 - * since there are 2^20 xsec in a second. - */ - div128_by_32((1ULL << 51) - ppc_tb_freq, 0, - tb_ticks_per_jiffy << SHIFT_HZ, &res); - div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); - ticklen_to_xs = res.result_low; - - /* Compute tb_to_xs from tick_nsec */ - tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); - /* * Compute scale factor for sched_clock. * The calibrate_decr() function has set tb_ticks_per_sec, @@ -1094,21 +1004,14 @@ void __init time_init(void) /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ boot_tb = get_tb_or_rtc(); - write_seqlock_irqsave(&xtime_lock, flags); - /* If platform provided a timezone (pmac), we correct the time */ if (timezone_offset) { sys_tz.tz_minuteswest = -timezone_offset / 60; sys_tz.tz_dsttime = 0; } - vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; - vdso_data->tb_to_xs = tb_to_xs; - - write_sequnlock_irqrestore(&xtime_lock, flags); /* Start the decrementer on CPUs that have manual control * such as BookE @@ -1202,39 +1105,6 @@ void to_tm(int tim, struct rtc_time * tm) GregorianDay(tm); } -/* Auxiliary function to compute scaling factors */ -/* Actually the choice of a timebase running at 1/4 the of the bus - * frequency giving resolution of a few tens of nanoseconds is quite nice. - * It makes this computation very precise (27-28 bits typically) which - * is optimistic considering the stability of most processor clock - * oscillators and the precision with which the timebase frequency - * is measured but does not harm. - */ -unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) -{ - unsigned mlt=0, tmp, err; - /* No concern for performance, it's done once: use a stupid - * but safe and compact method to find the multiplier. - */ - - for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { - if (mulhwu(inscale, mlt|tmp) < outscale) - mlt |= tmp; - } - - /* We might still be off by 1 for the best approximation. - * A side effect of this is that if outscale is too large - * the returned value will be zero. - * Many corner cases have been checked and seem to work, - * some might have been forgotten in the test however. - */ - - err = inscale * (mlt+1); - if (err <= inscale/2) - mlt++; - return mlt; -} - /* * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit * result. -- cgit v1.2.1