# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1303735506 -3600
# Node ID e102c30f19c80e085890e471e68795ceb5ec15cf
# Parent dbbc61c48da49d205516623951d38baf9bab43f5
x86: don't write_tsc() non-zero values on CPUs updating only the lower 32 bits
This means suppressing the uses in time_calibration_tsc_rendezvous(),
cstate_restore_tsc(), and synchronize_tsc_slave(), and fixes a boot
hang of Linux Dom0 when loading processor.ko on such systems that
have support for C states above C1.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxx>
xen-unstable changeset: 23228:1329d99b4f16
xen-unstable date: Fri Apr 15 08:52:08 2011 +0100
---
diff -r dbbc61c48da4 -r e102c30f19c8 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c Wed Apr 13 09:48:17 2011 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c Mon Apr 25 13:45:06 2011 +0100
@@ -941,3 +941,7 @@
hpet_disable_legacy_broadcast();
}
+bool_t cpuidle_using_deep_cstate(void)
+{
+ return xen_cpuidle && max_cstate > (local_apic_timer_c2_ok ? 2 : 1);
+}
diff -r dbbc61c48da4 -r e102c30f19c8 xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Wed Apr 13 09:48:17 2011 +0100
+++ b/xen/arch/x86/smpboot.c Mon Apr 25 13:45:06 2011 +0100
@@ -52,6 +52,7 @@
#include <asm/flushtlb.h>
#include <asm/msr.h>
#include <asm/mtrr.h>
+#include <asm/time.h>
#include <mach_apic.h>
#include <mach_wakecpu.h>
#include <smpboot_hooks.h>
@@ -171,6 +172,12 @@
* then we print a warning if not, and always resync.
*/
+/*
+ * TSC's upper 32 bits can't be written in earlier CPUs (before
+ * Prescott), there is no way to resync one AP against BP.
+ */
+bool_t disable_tsc_sync;
+
static atomic_t tsc_start_flag = ATOMIC_INIT(0);
static atomic_t tsc_count_start = ATOMIC_INIT(0);
static atomic_t tsc_count_stop = ATOMIC_INIT(0);
@@ -187,6 +194,9 @@
unsigned int one_usec;
int buggy = 0;
+ if ( disable_tsc_sync )
+ return;
+
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
printk("TSC is reliable, synchronization unnecessary\n");
return;
@@ -284,6 +294,9 @@
{
int i;
+ if ( disable_tsc_sync )
+ return;
+
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
return;
diff -r dbbc61c48da4 -r e102c30f19c8 xen/arch/x86/time.c
--- a/xen/arch/x86/time.c Wed Apr 13 09:48:17 2011 +0100
+++ b/xen/arch/x86/time.c Mon Apr 25 13:45:06 2011 +0100
@@ -21,6 +21,7 @@
#include <xen/smp.h>
#include <xen/irq.h>
#include <xen/softirq.h>
+#include <xen/cpuidle.h>
#include <xen/keyhandler.h>
#include <xen/guest_access.h>
#include <asm/io.h>
@@ -175,7 +176,6 @@
* cpu_mask that denotes the CPUs that needs timer interrupt coming in as
* IPIs in place of local APIC timers
*/
-extern int xen_cpuidle;
static cpumask_t pit_broadcast_mask;
static void smp_send_timer_broadcast_ipi(void)
@@ -724,6 +724,8 @@
new_tsc = t->local_tsc_stamp + scale_delta(stime_delta, &sys_to_tsc);
+ ASSERT(boot_cpu_has(X86_FEATURE_TSC_RELIABLE));
+
write_tsc(new_tsc);
}
@@ -1416,6 +1418,66 @@
}
}
+/*
+ * On certain older Intel CPUs writing the TSC MSR clears the upper 32 bits.
+ * Obviously we must not use write_tsc() on such CPUs.
+ *
+ * Additionally, AMD specifies that being able to write the TSC MSR is not an
+ * architectural feature (but, other than their manual says, also cannot be
+ * determined from CPUID bits).
+ */
+static void __init tsc_check_writability(void)
+{
+ const char *what = NULL;
+ uint64_t tsc;
+
+ /*
+ * If all CPUs are reported as synchronised and in sync, we never write
+ * the TSCs (except unavoidably, when a CPU is physically hot-plugged).
+ * Hence testing for writability is pointless and even harmful.
+ */
+ if ( boot_cpu_has(X86_FEATURE_TSC_RELIABLE) )
+ return;
+
+ rdtscll(tsc);
+ if ( wrmsr_safe(MSR_IA32_TSC, 0, 0) == 0 )
+ {
+ uint64_t tmp, tmp2;
+ rdtscll(tmp2);
+ write_tsc(tsc | (1ULL << 32));
+ rdtscll(tmp);
+ if ( ABS((s64)tmp - (s64)tmp2) < (1LL << 31) )
+ what = "only partially";
+ }
+ else
+ {
+ what = "not";
+ }
+
+ /* Nothing to do if the TSC is fully writable. */
+ if ( !what )
+ {
+ /*
+ * Paranoia - write back original TSC value. However, APs get synced
+ * with BSP as they are brought up, so this doesn't much matter.
+ */
+ write_tsc(tsc);
+ return;
+ }
+
+ printk(XENLOG_WARNING "TSC %s writable\n", what);
+
+ /* time_calibration_tsc_rendezvous() must not be used */
+ setup_clear_cpu_cap(X86_FEATURE_CONSTANT_TSC);
+
+ /* cstate_restore_tsc() must not be used (or do nothing) */
+ if ( !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) )
+ cpuidle_disable_deep_cstate();
+
+ /* synchronize_tsc_slave() must do nothing */
+ disable_tsc_sync = 1;
+}
+
/* Late init function (after all CPUs are booted). */
int __init init_xen_time(void)
{
@@ -1432,6 +1494,8 @@
setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE);
}
+ tsc_check_writability();
+
/* If we have constant-rate TSCs then scale factor can be shared. */
if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) )
{
@@ -1486,7 +1550,7 @@
* XXX dom0 may rely on RTC interrupt delivery, so only enable
* hpet_broadcast if FSB mode available or if force_hpet_broadcast.
*/
- if ( xen_cpuidle && !boot_cpu_has(X86_FEATURE_ARAT) )
+ if ( cpuidle_using_deep_cstate() && !boot_cpu_has(X86_FEATURE_ARAT) )
{
hpet_broadcast_init();
if ( !hpet_broadcast_is_available() )
diff -r dbbc61c48da4 -r e102c30f19c8 xen/include/asm-x86/time.h
--- a/xen/include/asm-x86/time.h Wed Apr 13 09:48:17 2011 +0100
+++ b/xen/include/asm-x86/time.h Mon Apr 25 13:45:06 2011 +0100
@@ -27,6 +27,8 @@
typedef u64 cycles_t;
+extern bool_t disable_tsc_sync;
+
static inline cycles_t get_cycles(void)
{
cycles_t c;
diff -r dbbc61c48da4 -r e102c30f19c8 xen/include/xen/cpuidle.h
--- a/xen/include/xen/cpuidle.h Wed Apr 13 09:48:17 2011 +0100
+++ b/xen/include/xen/cpuidle.h Mon Apr 25 13:45:06 2011 +0100
@@ -83,7 +83,10 @@
void (*reflect) (struct acpi_processor_power *dev);
};
+extern s8 xen_cpuidle;
extern struct cpuidle_governor *cpuidle_current_governor;
+
+bool_t cpuidle_using_deep_cstate(void);
void cpuidle_disable_deep_cstate(void);
#define CPUIDLE_DRIVER_STATE_START 1
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|