# HG changeset patch # User Sheng Yang # Date 1253088148 -28800 # Node ID 183fa85d47d9411e6e21c4534d8e231feeac96a7 # Parent 78e51528eba7c5fd328c90acd4edc58ba46b58e8 Enable event channel and QEmu device support for hybrid guest Each VIRQ from 24 to 40 binding to a QEmu emulated pin, so that if device assert the pin, one VIRQ would be delivered to the guest instead. diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2797,6 +2797,8 @@ update_domain_wallclock_time(d); d->hybrid_enabled |= XEN_HYBRID_TIMER_ENABLED; } + if (a.flags & HVM_HYBRID_EVTCHN) + d->hybrid_enabled |= XEN_HYBRID_EVTCHN_ENABLED; param_fail5: rcu_unlock_domain(d); break; diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c --- a/xen/arch/x86/hvm/irq.c +++ b/xen/arch/x86/hvm/irq.c @@ -46,8 +46,18 @@ if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq && (hvm_irq->gsi_assert_count[isa_irq]++ == 0) ) { - vioapic_irq_positive_edge(d, isa_irq); - vpic_irq_positive_edge(d, isa_irq); + if ( !is_hybrid_evtchn_enabled_domain(d) ) + { + vioapic_irq_positive_edge(d, isa_irq); + vpic_irq_positive_edge(d, isa_irq); + } + else + { + /* TODO fix the critical region here */ + spin_unlock(&d->arch.hvm_domain.irq_lock); + send_guest_global_virq(d, VIRQ_EMUL_PIN(isa_irq)); + spin_lock(&d->arch.hvm_domain.irq_lock); + } } } @@ -76,8 +86,10 @@ link = hvm_pci_intx_link(device, intx); isa_irq = hvm_irq->pci_link.route[link]; if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq && - (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) - vpic_irq_negative_edge(d, isa_irq); + (--hvm_irq->gsi_assert_count[isa_irq] == 0) ) { + if ( !is_hybrid_evtchn_enabled_domain(d) ) + vpic_irq_negative_edge(d, isa_irq); + } } void hvm_pci_intx_deassert( @@ -93,6 +105,7 @@ { struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; unsigned int gsi = hvm_isa_irq_to_gsi(isa_irq); + int send_virq = 0; ASSERT(isa_irq <= 15); @@ -101,11 +114,21 @@ if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) && (hvm_irq->gsi_assert_count[gsi]++ == 0) ) { - vioapic_irq_positive_edge(d, gsi); - vpic_irq_positive_edge(d, isa_irq); + if ( !is_hybrid_evtchn_enabled_domain(d) ) + { + vioapic_irq_positive_edge(d, gsi); + vpic_irq_positive_edge(d, isa_irq); + } + else + { + send_virq = 1; + } } spin_unlock(&d->arch.hvm_domain.irq_lock); + + if (send_virq) + send_guest_global_virq(d, VIRQ_EMUL_PIN(isa_irq)); } void hvm_isa_irq_deassert( @@ -120,7 +143,10 @@ if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) && (--hvm_irq->gsi_assert_count[gsi] == 0) ) - vpic_irq_negative_edge(d, isa_irq); + { + if ( !is_hybrid_evtchn_enabled_domain(d) ) + vpic_irq_negative_edge(d, isa_irq); + } spin_unlock(&d->arch.hvm_domain.irq_lock); } diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -159,7 +159,12 @@ #define VIRQ_ARCH_6 22 #define VIRQ_ARCH_7 23 -#define NR_VIRQS 24 +#define VIRQ_EMUL_PIN_START 24 +#define VIRQ_EMUL_PIN_END 39 +#define VIRQ_EMUL_PIN_NUM 16 +#define VIRQ_EMUL_PIN(x) (VIRQ_EMUL_PIN_START + x) + +#define NR_VIRQS 40 /* * MMU-UPDATE REQUESTS