# HG changeset patch
# User maf46@xxxxxxxxxxxxxxxxx
# Node ID 581902ddd16f9ad9f3095a2e78bd212dd90d725e
# Parent 390e4d63cdb1de2a86c60cecdc4f82c5d69f3aca
# Parent 2ecb91fb6cdbdfa001a48bc2bb2b1db320a19485
Merged by hand.
diff -r 390e4d63cdb1 -r 581902ddd16f xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Tue Jul 19 10:40:26 2005
+++ b/xen/arch/x86/time.c Tue Jul 19 12:20:24 2005
@@ -30,6 +30,8 @@
#include <asm/div64.h>
#include <io_ports.h>
+#define EPOCH MILLISECS(1000)
+
unsigned long cpu_khz; /* CPU clock frequency in kHz. */
spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
int timer_ack = 0;
@@ -80,8 +82,8 @@
}
/*
- * 32-bit multiplication of integer multiplicand and fractional multiplier
- * yielding 32-bit integer product.
+ * 32-bit multiplication of multiplicand and fractional multiplier
+ * yielding 32-bit product (radix point at same position as in multiplicand).
*/
static inline u32 mul_frac(u32 multiplicand, u32 multiplier)
{
@@ -461,14 +463,22 @@
/*
* System time and TSC ticks elapsed during the previous calibration
- * 'epoch'. Also the accumulated error in the local estimate. All these
- * values end up down-shifted to fit in 32 bits.
+ * 'epoch'. These values are down-shifted to fit in 32 bits.
*/
- u64 stime_elapsed64, tsc_elapsed64, local_stime_error64;
- u32 stime_elapsed32, tsc_elapsed32, local_stime_error32;
+ u64 stime_elapsed64, tsc_elapsed64;
+ u32 stime_elapsed32, tsc_elapsed32;
+
+ /* The accumulated error in the local estimate. */
+ u64 local_stime_err;
+
+ /* Error correction to slow down a fast local clock. */
+ u32 error_factor = 0;
/* Calculated TSC shift to ensure 32-bit scale multiplier. */
int tsc_shift = 0;
+
+ /* The overall calibration scale multiplier. */
+ u32 calibration_mul_frac;
prev_tsc = cpu_time[cpu].local_tsc_stamp;
prev_local_stime = cpu_time[cpu].stime_local_stamp;
@@ -497,13 +507,17 @@
tsc_elapsed64 = curr_tsc - prev_tsc;
/*
- * Error in the local system time estimate. Clamp to epoch time period, or
- * we could end up with a negative scale factor (time going backwards!).
- * This effectively clamps the scale factor to >= 0.
+ * Calculate error-correction factor. This only slows down a fast local
+ * clock (slow clocks are warped forwards). The scale factor is clamped
+ * to >= 0.5.
*/
- local_stime_error64 = curr_local_stime - curr_master_stime;
- if ( local_stime_error64 > stime_elapsed64 )
- local_stime_error64 = stime_elapsed64;
+ if ( curr_local_stime != curr_master_stime )
+ {
+ local_stime_err = curr_local_stime - curr_master_stime;
+ if ( local_stime_err > EPOCH )
+ local_stime_err = EPOCH;
+ error_factor = div_frac(EPOCH, EPOCH + (u32)local_stime_err);
+ }
/*
* We require 0 < stime_elapsed < 2^31.
@@ -513,14 +527,12 @@
while ( ((u32)stime_elapsed64 != stime_elapsed64) ||
((s32)stime_elapsed64 < 0) )
{
- stime_elapsed64 >>= 1;
- tsc_elapsed64 >>= 1;
- local_stime_error64 >>= 1;
- }
-
- /* stime_master_diff (and hence stime_error) now fit in a 32-bit word. */
- stime_elapsed32 = (u32)stime_elapsed64;
- local_stime_error32 = (u32)local_stime_error64;
+ stime_elapsed64 >>= 1;
+ tsc_elapsed64 >>= 1;
+ }
+
+ /* stime_master_diff now fits in a 32-bit word. */
+ stime_elapsed32 = (u32)stime_elapsed64;
/* tsc_elapsed <= 2*stime_elapsed */
while ( tsc_elapsed64 > (stime_elapsed32 * 2) )
@@ -541,21 +553,22 @@
tsc_shift++;
}
+ calibration_mul_frac = div_frac(stime_elapsed32, tsc_elapsed32);
+ if ( error_factor != 0 )
+ calibration_mul_frac = mul_frac(calibration_mul_frac, error_factor);
+
#if 0
- printk("---%d: %08x %d\n", cpu,
- div_frac(stime_elapsed32 - local_stime_error32, tsc_elapsed32),
- tsc_shift);
+ printk("---%d: %08x %d\n", cpu, calibration_mul_frac, tsc_shift);
#endif
/* Record new timestamp information. */
- cpu_time[cpu].tsc_scale.mul_frac =
- div_frac(stime_elapsed32 - local_stime_error32, tsc_elapsed32);
+ cpu_time[cpu].tsc_scale.mul_frac = calibration_mul_frac;
cpu_time[cpu].tsc_scale.shift = tsc_shift;
cpu_time[cpu].local_tsc_stamp = curr_tsc;
cpu_time[cpu].stime_local_stamp = curr_local_stime;
cpu_time[cpu].stime_master_stamp = curr_master_stime;
- set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + MILLISECS(1000));
+ set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
if ( cpu == 0 )
platform_time_calibration();
@@ -577,7 +590,7 @@
init_ac_timer(&cpu_time[cpu].calibration_timer,
local_time_calibration, NULL, cpu);
- set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + MILLISECS(1000));
+ set_ac_timer(&cpu_time[cpu].calibration_timer, NOW() + EPOCH);
}
/* Late init function (after all CPUs are booted). */
diff -r 390e4d63cdb1 -r 581902ddd16f
linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Tue Jul 19 10:40:26 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/time.c Tue Jul 19 12:20:24 2005
@@ -227,9 +227,9 @@
static void update_wallclock(void)
{
shared_info_t *s = HYPERVISOR_shared_info;
- long wtm_nsec;
- time_t wtm_sec, sec;
- s64 nsec;
+ long wtm_nsec, xtime_nsec;
+ time_t wtm_sec, xtime_sec;
+ u64 tmp, usec;
shadow_tv.tv_sec = s->wc_sec;
shadow_tv.tv_usec = s->wc_usec;
@@ -240,20 +240,22 @@
if ((time_status & STA_UNSYNC) != 0)
return;
- /* Adjust shadow for jiffies that haven't updated xtime yet. */
- shadow_tv.tv_usec -=
- (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
- HANDLE_USEC_UNDERFLOW(shadow_tv);
-
- /* Update our unsynchronised xtime appropriately. */
- sec = shadow_tv.tv_sec;
- nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
-
- __normalize_time(&sec, &nsec);
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
+ /* Adjust wall-clock time base based on wall_jiffies ticks. */
+ usec = processed_system_time;
+ do_div(usec, 1000);
+ usec += (u64)shadow_tv.tv_sec * 1000000ULL;
+ usec += (u64)shadow_tv.tv_usec;
+ usec -= (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
+
+ /* Split wallclock base into seconds and nanoseconds. */
+ tmp = usec;
+ xtime_nsec = do_div(tmp, 1000000) * 1000ULL;
+ xtime_sec = (time_t)tmp;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
+
+ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|