# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1225896694 0
# Node ID 65ef40020d28e74177a0c21023509007097726a2
# Parent 509d67fe5120bf8ff19e1b2df21a4c5472292db2
Revert 720:509d67fe5120b. Breaks the build.
---
arch/i386/kernel/io_apic-xen.c | 3
arch/x86_64/kernel/io_apic-xen.c | 3
drivers/xen/core/evtchn.c | 583 +++++++++++++++++++--------------------
3 files changed, 293 insertions(+), 296 deletions(-)
diff -r 509d67fe5120 -r 65ef40020d28 arch/i386/kernel/io_apic-xen.c
--- a/arch/i386/kernel/io_apic-xen.c Wed Nov 05 14:45:34 2008 +0000
+++ b/arch/i386/kernel/io_apic-xen.c Wed Nov 05 14:51:34 2008 +0000
@@ -1216,9 +1216,6 @@ int assign_irq_vector(int irq)
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
- if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
- return -EINVAL;
-
spin_lock_irqsave(&vector_lock, flags);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
diff -r 509d67fe5120 -r 65ef40020d28 arch/x86_64/kernel/io_apic-xen.c
--- a/arch/x86_64/kernel/io_apic-xen.c Wed Nov 05 14:45:34 2008 +0000
+++ b/arch/x86_64/kernel/io_apic-xen.c Wed Nov 05 14:51:34 2008 +0000
@@ -895,9 +895,6 @@ int assign_irq_vector(int irq)
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
- if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
- return -EINVAL;
-
spin_lock_irqsave(&vector_lock, flags);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
diff -r 509d67fe5120 -r 65ef40020d28 drivers/xen/core/evtchn.c
--- a/drivers/xen/core/evtchn.c Wed Nov 05 14:45:34 2008 +0000
+++ b/drivers/xen/core/evtchn.c Wed Nov 05 14:51:34 2008 +0000
@@ -756,281 +756,18 @@ static struct hw_interrupt_type dynirq_t
.retrigger = resend_irq_on_evtchn,
};
-static inline void pirq_unmask_notify(int irq)
-{
- struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
- if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
- VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
-}
-
-static inline void pirq_query_unmask(int irq)
-{
- struct physdev_irq_status_query irq_status;
- irq_status.irq = evtchn_get_xen_pirq(irq);
- if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
- irq_status.flags = 0;
- clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
- if (irq_status.flags & XENIRQSTAT_needs_eoi)
- set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
-}
-
-/*
- * On startup, if there is no action associated with the IRQ then we are
- * probing. In this case we should not share with others as it will confuse us.
- */
-#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
-
-static unsigned int startup_pirq(unsigned int irq)
-{
- struct evtchn_bind_pirq bind_pirq;
- int evtchn = evtchn_from_irq(irq);
-
- if (VALID_EVTCHN(evtchn))
- goto out;
-
- bind_pirq.pirq = evtchn_get_xen_pirq(irq);
- /* NB. We are happy to share unless we are probing. */
- bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
- if (!probing_irq(irq))
- printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
- irq);
- return 0;
- }
- evtchn = bind_pirq.port;
-
- pirq_query_unmask(irq);
-
- evtchn_to_irq[evtchn] = irq;
- bind_evtchn_to_cpu(evtchn, 0);
- irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
-
- out:
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
-
- return 0;
-}
-
-static void shutdown_pirq(unsigned int irq)
-{
- struct evtchn_close close;
- int evtchn = evtchn_from_irq(irq);
-
- if (!VALID_EVTCHN(evtchn))
- return;
-
- mask_evtchn(evtchn);
-
- close.port = evtchn;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
-
- bind_evtchn_to_cpu(evtchn, 0);
- evtchn_to_irq[evtchn] = -1;
- irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
-}
-
-static void enable_pirq(unsigned int irq)
-{
- startup_pirq(irq);
-}
-
-static void disable_pirq(unsigned int irq)
-{
-}
-
-static void ack_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
-
- move_native_irq(irq);
-
- if (VALID_EVTCHN(evtchn)) {
- mask_evtchn(evtchn);
- clear_evtchn(evtchn);
- }
-}
-
-static void end_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
-
- if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
- (IRQ_DISABLED|IRQ_PENDING)) {
- shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- }
-}
-
-static struct hw_interrupt_type pirq_type = {
- .typename = "Phys-irq",
- .startup = startup_pirq,
- .shutdown = shutdown_pirq,
- .enable = enable_pirq,
- .disable = disable_pirq,
- .ack = ack_pirq,
- .end = end_pirq,
-#ifdef CONFIG_SMP
- .set_affinity = set_affinity_irq,
-#endif
- .retrigger = resend_irq_on_evtchn,
-};
-
-int irq_ignore_unhandled(unsigned int irq)
-{
- struct physdev_irq_status_query irq_status = { .irq = irq };
-
- if (!is_running_on_xen())
- return 0;
-
- if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
- return 0;
- return !!(irq_status.flags & XENIRQSTAT_shared);
-}
-
-void notify_remote_via_irq(int irq)
-{
- int evtchn = evtchn_from_irq(irq);
-
- if (VALID_EVTCHN(evtchn))
- notify_remote_via_evtchn(evtchn);
-}
-EXPORT_SYMBOL_GPL(notify_remote_via_irq);
-
-int irq_to_evtchn_port(int irq)
-{
- return evtchn_from_irq(irq);
-}
-EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
-
-void mask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- synch_set_bit(port, s->evtchn_mask);
-}
-EXPORT_SYMBOL_GPL(mask_evtchn);
-
-void unmask_evtchn(int port)
-{
- shared_info_t *s = HYPERVISOR_shared_info;
- unsigned int cpu = smp_processor_id();
- vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
-
- BUG_ON(!irqs_disabled());
-
- /* Slow path (hypercall) if this is a non-local port. */
- if (unlikely(cpu != cpu_from_evtchn(port))) {
- struct evtchn_unmask unmask = { .port = port };
- VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
- return;
- }
-
- synch_clear_bit(port, s->evtchn_mask);
-
- /* Did we miss an interrupt 'edge'? Re-fire if so. */
- if (synch_test_bit(port, s->evtchn_pending) &&
- !synch_test_and_set_bit(port / BITS_PER_LONG,
- &vcpu_info->evtchn_pending_sel))
- vcpu_info->evtchn_upcall_pending = 1;
-}
-EXPORT_SYMBOL_GPL(unmask_evtchn);
-
-void disable_all_local_evtchn(void)
-{
- unsigned i, cpu = smp_processor_id();
- shared_info_t *s = HYPERVISOR_shared_info;
-
- for (i = 0; i < NR_EVENT_CHANNELS; ++i)
- if (cpu_from_evtchn(i) == cpu)
- synch_set_bit(i, &s->evtchn_mask[0]);
-}
-
-static void restore_cpu_virqs(unsigned int cpu)
-{
- struct evtchn_bind_virq bind_virq;
- int virq, irq, evtchn;
-
- for (virq = 0; virq < NR_VIRQS; virq++) {
- if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
- continue;
-
- BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
-
- /* Get a new binding from Xen. */
- bind_virq.virq = virq;
- bind_virq.vcpu = cpu;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
- &bind_virq) != 0)
- BUG();
- evtchn = bind_virq.port;
-
- /* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
- bind_evtchn_to_cpu(evtchn, cpu);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
- }
-}
-
-static void restore_cpu_ipis(unsigned int cpu)
-{
- struct evtchn_bind_ipi bind_ipi;
- int ipi, irq, evtchn;
-
- for (ipi = 0; ipi < NR_IPIS; ipi++) {
- if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
- continue;
-
- BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
-
- /* Get a new binding from Xen. */
- bind_ipi.vcpu = cpu;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
- &bind_ipi) != 0)
- BUG();
- evtchn = bind_ipi.port;
-
- /* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
- bind_evtchn_to_cpu(evtchn, cpu);
-
- /* Ready for use. */
- unmask_evtchn(evtchn);
-
- }
-}
-
-void irq_resume(void)
-{
- unsigned int cpu, irq, evtchn;
-
- init_evtchn_cpu_bindings();
-
- /* New event-channel space is not 'live' yet. */
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- mask_evtchn(evtchn);
-
- /* Check that no PIRQs are still bound. */
- for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
- BUG_ON(irq_info[irq] != IRQ_UNBOUND);
-
- /* No IRQ <-> event-channel mappings. */
- for (irq = 0; irq < NR_IRQS; irq++)
- irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
- for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
- evtchn_to_irq[evtchn] = -1;
-
- for_each_possible_cpu(cpu) {
- restore_cpu_virqs(cpu);
- restore_cpu_ipis(cpu);
- }
-
+void evtchn_register_pirq(int irq)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
+
+ /* Cannot call set_irq_probe(), as that's marked __init. */
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->status &= ~IRQ_NOPROBE;
+ spin_unlock_irqrestore(&desc->lock, flags);
}
#if defined(CONFIG_X86_IO_APIC)
@@ -1040,15 +777,6 @@ void irq_resume(void)
#else
#define identity_mapped_irq(irq) (1)
#endif
-
-void evtchn_register_pirq(int irq)
-{
- BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS);
- if (identity_mapped_irq(irq))
- return;
- irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
- set_irq_chip_and_handler_name(irq, &pirq_chip, handle_level_irq,
"level");
-}
int evtchn_map_pirq(int irq, int xen_pirq)
{
@@ -1070,11 +798,9 @@ int evtchn_map_pirq(int irq, int xen_pir
spin_unlock(&irq_alloc_lock);
if (irq < PIRQ_BASE)
return -ENOSPC;
- irq_desc[irq].chip = &pirq_type;
} else if (!xen_pirq) {
if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
return -EINVAL;
- irq_desc[irq].chip = &no_irq_type;
irq_info[irq] = IRQ_UNBOUND;
return 0;
} else if (type_from_irq(irq) != IRQT_PIRQ
@@ -1095,6 +821,283 @@ int evtchn_get_xen_pirq(int irq)
return index_from_irq(irq);
}
+static inline void pirq_unmask_notify(int irq)
+{
+ struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
+ if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
+ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
+}
+
+static inline void pirq_query_unmask(int irq)
+{
+ struct physdev_irq_status_query irq_status;
+ irq_status.irq = evtchn_get_xen_pirq(irq);
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
+ irq_status.flags = 0;
+ clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
+ if (irq_status.flags & XENIRQSTAT_needs_eoi)
+ set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
+}
+
+/*
+ * On startup, if there is no action associated with the IRQ then we are
+ * probing. In this case we should not share with others as it will confuse us.
+ */
+#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+
+static unsigned int startup_pirq(unsigned int irq)
+{
+ struct evtchn_bind_pirq bind_pirq;
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ goto out;
+
+ bind_pirq.pirq = evtchn_get_xen_pirq(irq);
+ /* NB. We are happy to share unless we are probing. */
+ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
+ if (!probing_irq(irq))
+ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
+ irq);
+ return 0;
+ }
+ evtchn = bind_pirq.port;
+
+ pirq_query_unmask(irq);
+
+ evtchn_to_irq[evtchn] = irq;
+ bind_evtchn_to_cpu(evtchn, 0);
+ irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
+
+ out:
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq);
+
+ return 0;
+}
+
+static void shutdown_pirq(unsigned int irq)
+{
+ struct evtchn_close close;
+ int evtchn = evtchn_from_irq(irq);
+
+ if (!VALID_EVTCHN(evtchn))
+ return;
+
+ mask_evtchn(evtchn);
+
+ close.port = evtchn;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+
+ bind_evtchn_to_cpu(evtchn, 0);
+ evtchn_to_irq[evtchn] = -1;
+ irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
+}
+
+static void enable_pirq(unsigned int irq)
+{
+ startup_pirq(irq);
+}
+
+static void disable_pirq(unsigned int irq)
+{
+}
+
+static void ack_pirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ move_native_irq(irq);
+
+ if (VALID_EVTCHN(evtchn)) {
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+ }
+}
+
+static void end_pirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
+ (IRQ_DISABLED|IRQ_PENDING)) {
+ shutdown_pirq(irq);
+ } else if (VALID_EVTCHN(evtchn)) {
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq);
+ }
+}
+
+static struct hw_interrupt_type pirq_type = {
+ .typename = "Phys-irq",
+ .startup = startup_pirq,
+ .shutdown = shutdown_pirq,
+ .enable = enable_pirq,
+ .disable = disable_pirq,
+ .ack = ack_pirq,
+ .end = end_pirq,
+#ifdef CONFIG_SMP
+ .set_affinity = set_affinity_irq,
+#endif
+ .retrigger = resend_irq_on_evtchn,
+};
+
+int irq_ignore_unhandled(unsigned int irq)
+{
+ struct physdev_irq_status_query irq_status = { .irq = irq };
+
+ if (!is_running_on_xen())
+ return 0;
+
+ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
+ return 0;
+ return !!(irq_status.flags & XENIRQSTAT_shared);
+}
+
+void notify_remote_via_irq(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ notify_remote_via_evtchn(evtchn);
+}
+EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+
+int irq_to_evtchn_port(int irq)
+{
+ return evtchn_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
+
+void mask_evtchn(int port)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+ synch_set_bit(port, s->evtchn_mask);
+}
+EXPORT_SYMBOL_GPL(mask_evtchn);
+
+void unmask_evtchn(int port)
+{
+ shared_info_t *s = HYPERVISOR_shared_info;
+ unsigned int cpu = smp_processor_id();
+ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
+
+ BUG_ON(!irqs_disabled());
+
+ /* Slow path (hypercall) if this is a non-local port. */
+ if (unlikely(cpu != cpu_from_evtchn(port))) {
+ struct evtchn_unmask unmask = { .port = port };
+ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
+ return;
+ }
+
+ synch_clear_bit(port, s->evtchn_mask);
+
+ /* Did we miss an interrupt 'edge'? Re-fire if so. */
+ if (synch_test_bit(port, s->evtchn_pending) &&
+ !synch_test_and_set_bit(port / BITS_PER_LONG,
+ &vcpu_info->evtchn_pending_sel))
+ vcpu_info->evtchn_upcall_pending = 1;
+}
+EXPORT_SYMBOL_GPL(unmask_evtchn);
+
+void disable_all_local_evtchn(void)
+{
+ unsigned i, cpu = smp_processor_id();
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ for (i = 0; i < NR_EVENT_CHANNELS; ++i)
+ if (cpu_from_evtchn(i) == cpu)
+ synch_set_bit(i, &s->evtchn_mask[0]);
+}
+
+static void restore_cpu_virqs(unsigned int cpu)
+{
+ struct evtchn_bind_virq bind_virq;
+ int virq, irq, evtchn;
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
+
+ /* Get a new binding from Xen. */
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
+}
+
+static void restore_cpu_ipis(unsigned int cpu)
+{
+ struct evtchn_bind_ipi bind_ipi;
+ int ipi, irq, evtchn;
+
+ for (ipi = 0; ipi < NR_IPIS; ipi++) {
+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
+
+ /* Get a new binding from Xen. */
+ bind_ipi.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+
+ }
+}
+
+void irq_resume(void)
+{
+ unsigned int cpu, irq, evtchn;
+
+ init_evtchn_cpu_bindings();
+
+ /* New event-channel space is not 'live' yet. */
+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+ mask_evtchn(evtchn);
+
+ /* Check that no PIRQs are still bound. */
+ for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
+ BUG_ON(irq_info[irq] != IRQ_UNBOUND);
+
+ /* No IRQ <-> event-channel mappings. */
+ for (irq = 0; irq < NR_IRQS; irq++)
+ irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+ evtchn_to_irq[evtchn] = -1;
+
+ for_each_possible_cpu(cpu) {
+ restore_cpu_virqs(cpu);
+ restore_cpu_ipis(cpu);
+ }
+
+}
+
void __init xen_init_IRQ(void)
{
unsigned int i;
@@ -1123,16 +1126,16 @@ void __init xen_init_IRQ(void)
for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) {
irq_bindcount[i] = 1;
- if (!identity_mapped_irq(i))
- continue;
-
#ifdef RTC_IRQ
/* If not domain 0, force our RTC driver to fail its probe. */
- if (i - PIRQ_BASE == RTC_IRQ && !is_initial_xendomain())
+ if (identity_mapped_irq(i) && ((i - PIRQ_BASE) == RTC_IRQ)
+ && !is_initial_xendomain())
continue;
#endif
irq_desc[i].status = IRQ_DISABLED;
+ if (!identity_mapped_irq(i))
+ irq_desc[i].status |= IRQ_NOPROBE;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &pirq_type;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|