|
|
|
|
|
|
|
|
|
|
xen-changelog
[Xen-changelog] [xen-unstable] C6 state with EOI issue fix for some Inte
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1284537635 -3600
# Node ID 1087f9a03ab61d3a8bb0a1c65e5b09f82f3a4277
# Parent 62edd2611cbbe4c50574b6f6f73dda2ae1136dde
C6 state with EOI issue fix for some Intel processors
There is an errata in some of Intel processors.
AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6
During an Interrupt Service Routine
If core C6 is entered after the start of an interrupt service routine
but before a write to the APIC EOI register, the core may not send an
EOI transaction (if needed) and further interrupts from the same
priority level or lower may be blocked.
This patch fix this issue, by checking if ISR is pending before enter
deep Cx state. If so, it would use power->safe_state instead of deep
Cx state to prevent the above issue happen.
Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
xen/arch/x86/acpi/cpu_idle.c | 28 ++++++++++++++++++++++++++++
xen/arch/x86/irq.c | 5 +++++
xen/include/asm-x86/irq.h | 2 ++
3 files changed, 35 insertions(+)
diff -r 62edd2611cbb -r 1087f9a03ab6 xen/arch/x86/acpi/cpu_idle.c
--- a/xen/arch/x86/acpi/cpu_idle.c Wed Sep 15 08:18:53 2010 +0100
+++ b/xen/arch/x86/acpi/cpu_idle.c Wed Sep 15 09:00:35 2010 +0100
@@ -351,6 +351,31 @@ static int sched_has_urgent_vcpu(void)
return atomic_read(&this_cpu(schedule_data).urgent_count);
}
+/*
+ * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During
+ * an Interrupt Service Routine"
+ *
+ * There was an errata with some Core i7 processors that an EOI transaction
+ * may not be sent if software enters core C6 during an interrupt service
+ * routine. So we don't enter deep Cx state if there is an EOI pending.
+ */
+bool_t errata_c6_eoi_workaround(void)
+{
+ static bool_t fix_needed = -1;
+
+ if ( unlikely(fix_needed == -1) )
+ {
+ int model = boot_cpu_data.x86_model;
+ fix_needed = (cpu_has_apic && !directed_eoi_enabled &&
+ (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+ (boot_cpu_data.x86 == 6) &&
+ ((model == 0x1a) || (model == 0x1e) || (model == 0x1f) ||
+ (model == 0x25) || (model == 0x2c) || (model == 0x2f)));
+ }
+
+ return (fix_needed && cpu_has_pending_apic_eoi());
+}
+
static void acpi_processor_idle(void)
{
struct acpi_processor_power *power = processor_powers[smp_processor_id()];
@@ -400,6 +425,9 @@ static void acpi_processor_idle(void)
cpufreq_dbs_timer_resume();
return;
}
+
+ if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
+ cx = power->safe_state;
power->last_state = cx;
diff -r 62edd2611cbb -r 1087f9a03ab6 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Wed Sep 15 08:18:53 2010 +0100
+++ b/xen/arch/x86/irq.c Wed Sep 15 09:00:35 2010 +0100
@@ -765,6 +765,11 @@ static DEFINE_PER_CPU(struct pending_eoi
static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_DYNAMIC_VECTORS]);
#define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS-1].vector)
+bool_t cpu_has_pending_apic_eoi(void)
+{
+ return (pending_eoi_sp(this_cpu(pending_eoi)) != 0);
+}
+
static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
{
if ( d->arch.pirq_eoi_map )
diff -r 62edd2611cbb -r 1087f9a03ab6 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Wed Sep 15 08:18:53 2010 +0100
+++ b/xen/include/asm-x86/irq.h Wed Sep 15 09:00:35 2010 +0100
@@ -147,4 +147,6 @@ void irq_set_affinity(struct irq_desc *,
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+bool_t cpu_has_pending_apic_eoi(void);
+
#endif /* _ASM_HW_IRQ_H */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
<Prev in Thread] |
Current Thread |
[Next in Thread> |
- [Xen-changelog] [xen-unstable] C6 state with EOI issue fix for some Intel processors,
Xen patchbot-unstable <=
|
|
|
|
|