I noticed when saving and restoring a 4-way domU that the CMC polling
mechanism was being triggered. This is actually a generic domU CPU
hotplug problem. We currently aren't setting the IRQ_PER_CPU status bit
in the IRQ description structure. This causes migrate_irq() to mark the
IRQ for migration and trigger it before taking the CPU offline. So
every time a CPU is taken offline, it first receives a CMC interrupt and
a CMC polling interrupt. If you have 4 or more CPUs, this is enough to
exceed the polling threshold and switch the CMC driver to polling mode.
I also removed some printks that now seem extraneous and switched to
using slightly more descriptive variables. Thanks,
Alex
Signed-off-by: Alex Williamson <alex.williamson@xxxxxx>
---
diff -r b4df7de0cbf7 linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c
--- a/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c Wed Jan 24 12:28:05
2007 -0700
+++ b/linux-2.6-xen-sparse/arch/ia64/kernel/irq_ia64.c Wed Jan 24 20:33:37
2007 -0700
@@ -303,81 +303,85 @@ static struct irqaction resched_irqactio
* required.
*/
static void
-xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
+xen_register_percpu_irq (unsigned int vec, struct irqaction *action, int save)
{
unsigned int cpu = smp_processor_id();
- int ret = 0;
+ irq_desc_t *desc;
+ int irq = 0;
if (xen_slab_ready) {
- switch (irq) {
+ switch (vec) {
case IA64_TIMER_VECTOR:
sprintf(timer_name[cpu], "%s%d", action->name, cpu);
- ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
+ irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
timer_name[cpu], action->dev_id);
- per_cpu(timer_irq,cpu) = ret;
- printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq
(%d)\n", timer_name[cpu], ret);
+ per_cpu(timer_irq,cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
sprintf(resched_name[cpu], "%s%d", action->name, cpu);
- ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
+ irq = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
resched_name[cpu], action->dev_id);
- per_cpu(resched_irq,cpu) = ret;
- printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to
xen irq (%d)\n", resched_name[cpu], ret);
+ per_cpu(resched_irq,cpu) = irq;
break;
case IA64_IPI_VECTOR:
sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
- ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
+ irq = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
action->handler, action->flags,
ipi_name[cpu], action->dev_id);
- per_cpu(ipi_irq,cpu) = ret;
- printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq
(%d)\n", ipi_name[cpu], ret);
- break;
- case IA64_SPURIOUS_INT_VECTOR:
+ per_cpu(ipi_irq,cpu) = irq;
break;
case IA64_CMC_VECTOR:
sprintf(cmc_name[cpu], "%s%d", action->name, cpu);
- ret = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
+ irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
action->handler,
action->flags,
cmc_name[cpu],
action->dev_id);
- per_cpu(cmc_irq,cpu) = ret;
- printk(KERN_INFO "register VIRQ_MCA_CMC (%s) to xen "
- "irq (%d)\n", cmc_name[cpu], ret);
+ per_cpu(cmc_irq,cpu) = irq;
break;
case IA64_CMCP_VECTOR:
sprintf(cmcp_name[cpu], "%s%d", action->name, cpu);
- ret = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
+ irq = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
action->handler,
action->flags,
cmcp_name[cpu],
action->dev_id);
- per_cpu(cmcp_irq,cpu) = ret;
- printk(KERN_INFO "register CMCP_VECTOR (%s) to xen "
- "irq (%d)\n", cmcp_name[cpu], ret);
+ per_cpu(cmcp_irq,cpu) = irq;
break;
case IA64_CPEP_VECTOR:
sprintf(cpep_name[cpu], "%s%d", action->name, cpu);
- ret = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
+ irq = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
action->handler,
action->flags,
cpep_name[cpu],
action->dev_id);
- per_cpu(cpep_irq,cpu) = ret;
- printk(KERN_INFO "register CPEP_VECTOR (%s) to xen "
- "irq (%d)\n", cpep_name[cpu], ret);
+ per_cpu(cpep_irq,cpu) = irq;
break;
case IA64_CPE_VECTOR:
- printk(KERN_WARNING "register IA64_CPE_VECTOR "
- "IGNORED\n");
+ case IA64_MCA_RENDEZ_VECTOR:
+ case IA64_PERFMON_VECTOR:
+ case IA64_MCA_WAKEUP_VECTOR:
+ case IA64_SPURIOUS_INT_VECTOR:
+ /* No need to complain, these aren't supported. */
break;
default:
- printk(KERN_WARNING "Percpu irq %d is unsupported by
xen!\n", irq);
- break;
- }
- BUG_ON(ret < 0);
+ printk(KERN_WARNING "Percpu irq %d is unsupported "
+ "by xen!\n", vec);
+ break;
+ }
+ BUG_ON(irq < 0);
+
+ if (irq > 0) {
+ /*
+ * Mark percpu. Without this, migrate_irqs() will
+ * mark the interrupt for migrations and trigger it
+ * on cpu hotplug.
+ */
+ desc = irq_descp(irq);
+ desc->status |= IRQ_PER_CPU;
+ }
}
/* For BSP, we cache registered percpu irqs, and then re-walk
@@ -385,7 +389,7 @@ xen_register_percpu_irq (unsigned int ir
*/
if (!cpu && save) {
BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
- saved_percpu_irqs[saved_irq_cnt].irq = irq;
+ saved_percpu_irqs[saved_irq_cnt].irq = vec;
saved_percpu_irqs[saved_irq_cnt].action = action;
saved_irq_cnt++;
if (!xen_slab_ready)
@@ -572,7 +576,8 @@ ia64_send_ipi (int cpu, int vector, int
irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
break;
default:
- printk(KERN_WARNING"Unsupported IPI type 0x%x\n",
vector);
+ printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
+ vector);
irq = 0;
break;
}
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel
|