Hi all,
this patch is for Xen and it is derived from an earlier patch by Shen.
The changes introduced by this patch include:
- add support to some vcpuops and physdevops for HVM domains;
- introduce a new interrupt callback method for HVM domains;
- allow HVM guests to map emulated interrupts to pirqs;
in particular the last point is new and it isn't present in the
corresponding patch from Shen.
This patch doesn't introduce an "enable_pv" hypercall and resets the tsc
offset when the guest binds the VIRQ_TIMER.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
---
diff -r 3bb163b74673 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/domain.c Tue Mar 02 17:20:42 2010 +0000
@@ -495,6 +495,16 @@
for ( i = 1; platform_legacy_irq(i); ++i )
if ( !IO_APIC_IRQ(i) )
d->arch.irq_pirq[i] = d->arch.pirq_irq[i] = i;
+
+ d->arch.pirq_emuirq = xmalloc_array(int, d->nr_pirqs);
+ d->arch.emuirq_pirq = xmalloc_array(int, d->nr_pirqs);
+ if ( !d->arch.pirq_emuirq || !d->arch.emuirq_pirq )
+ goto fail;
+ memset(d->arch.pirq_emuirq, -1,
+ d->nr_pirqs * sizeof(*d->arch.pirq_emuirq));
+ memset(d->arch.emuirq_pirq, -1,
+ d->nr_pirqs * sizeof(*d->arch.emuirq_pirq));
+
if ( (rc = iommu_domain_init(d)) != 0 )
goto fail;
diff -r 3bb163b74673 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/hvm/hvm.c Tue Mar 02 17:20:42 2010 +0000
@@ -2231,6 +2231,21 @@
return rc;
}
+static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ switch ( cmd )
+ {
+ case PHYSDEVOP_setup_gsi:
+ case PHYSDEVOP_map_pirq:
+ case PHYSDEVOP_unmap_pirq:
+ case PHYSDEVOP_eoi:
+ case PHYSDEVOP_irq_status_query:
+ return do_physdev_op(cmd, arg);
+ default:
+ return -ENOSYS;
+ }
+}
+
static long hvm_vcpu_op(
int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
{
@@ -2240,6 +2255,14 @@
{
case VCPUOP_register_runstate_memory_area:
case VCPUOP_get_runstate_info:
+ case VCPUOP_initialise:
+ case VCPUOP_up:
+ case VCPUOP_is_up:
+ case VCPUOP_set_periodic_timer:
+ case VCPUOP_stop_periodic_timer:
+ case VCPUOP_set_singleshot_timer:
+ case VCPUOP_stop_singleshot_timer:
+ case VCPUOP_register_vcpu_time_memory_area:
rc = do_vcpu_op(cmd, vcpuid, arg);
break;
default:
@@ -2262,9 +2285,11 @@
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
+ HYPERCALL(set_timer_op),
HYPERCALL(hvm_op)
};
@@ -2296,6 +2321,14 @@
{
case VCPUOP_register_runstate_memory_area:
case VCPUOP_get_runstate_info:
+ case VCPUOP_initialise:
+ case VCPUOP_up:
+ case VCPUOP_is_up:
+ case VCPUOP_set_periodic_timer:
+ case VCPUOP_stop_periodic_timer:
+ case VCPUOP_set_singleshot_timer:
+ case VCPUOP_stop_singleshot_timer:
+ case VCPUOP_register_vcpu_time_memory_area:
rc = compat_vcpu_op(cmd, vcpuid, arg);
break;
default:
@@ -2306,13 +2339,33 @@
return rc;
}
+static long hvm_physdev_op_compat32(
+ int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ switch ( cmd )
+ {
+ case PHYSDEVOP_setup_gsi:
+ case PHYSDEVOP_map_pirq:
+ case PHYSDEVOP_unmap_pirq:
+ case PHYSDEVOP_eoi:
+ case PHYSDEVOP_irq_status_query:
+ return compat_physdev_op(cmd, arg);
+ break;
+ default:
+ return -ENOSYS;
+ break;
+ }
+}
+
static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
+ HYPERCALL(set_timer_op),
HYPERCALL(hvm_op)
};
@@ -2320,9 +2373,11 @@
[ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32,
[ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t
*)hvm_grant_table_op_compat32,
[ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32,
+ [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op_compat32,
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
+ HYPERCALL(set_timer_op),
HYPERCALL(hvm_op)
};
diff -r 3bb163b74673 xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/hvm/irq.c Tue Mar 02 17:20:42 2010 +0000
@@ -23,8 +23,29 @@
#include <xen/types.h>
#include <xen/event.h>
#include <xen/sched.h>
+#include <xen/irq.h>
#include <asm/hvm/domain.h>
#include <asm/hvm/support.h>
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
+{
+ int pirq = domain_emuirq_to_pirq(d, ioapic_gsi);
+ if ( pirq >= 0 )
+ {
+ send_guest_pirq(d, pirq);
+ return;
+ }
+ vioapic_irq_positive_edge(d, ioapic_gsi);
+ vpic_irq_positive_edge(d, pic_irq);
+}
+
+/* Must be called with hvm_domain->irq_lock hold */
+static void deassert_irq(struct domain *d, unsigned isa_irq)
+{
+ if ( domain_emuirq_to_pirq(d, isa_irq) <= 0 )
+ vpic_irq_negative_edge(d, isa_irq);
+}
static void __hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx)
@@ -45,10 +66,7 @@
isa_irq = hvm_irq->pci_link.route[link];
if ( (hvm_irq->pci_link_assert_count[link]++ == 0) && isa_irq &&
(hvm_irq->gsi_assert_count[isa_irq]++ == 0) )
- {
- vioapic_irq_positive_edge(d, isa_irq);
- vpic_irq_positive_edge(d, isa_irq);
- }
+ assert_irq(d, isa_irq, isa_irq);
}
void hvm_pci_intx_assert(
@@ -77,7 +95,7 @@
isa_irq = hvm_irq->pci_link.route[link];
if ( (--hvm_irq->pci_link_assert_count[link] == 0) && isa_irq &&
(--hvm_irq->gsi_assert_count[isa_irq] == 0) )
- vpic_irq_negative_edge(d, isa_irq);
+ deassert_irq(d, isa_irq);
}
void hvm_pci_intx_deassert(
@@ -100,10 +118,7 @@
if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
- {
- vioapic_irq_positive_edge(d, gsi);
- vpic_irq_positive_edge(d, isa_irq);
- }
+ assert_irq(d, gsi, isa_irq);
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
@@ -120,7 +135,7 @@
if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(--hvm_irq->gsi_assert_count[gsi] == 0) )
- vpic_irq_negative_edge(d, isa_irq);
+ deassert_irq(d, isa_irq);
spin_unlock(&d->arch.hvm_domain.irq_lock);
}
@@ -185,16 +200,16 @@
void hvm_assert_evtchn_irq(struct vcpu *v)
{
- if ( v->vcpu_id != 0 )
- return;
-
if ( unlikely(in_irq() || !local_irq_is_enabled()) )
{
tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
return;
}
- hvm_set_callback_irq_level(v);
+ if (is_hvm_pv_evtchn_vcpu(v))
+ vcpu_kick(v);
+ else
+ hvm_set_callback_irq_level(v);
}
void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
@@ -251,7 +266,7 @@
via_type = (uint8_t)(via >> 56) + 1;
if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
- (via_type > HVMIRQ_callback_pci_intx) )
+ (via_type > HVMIRQ_callback_vector) )
via_type = HVMIRQ_callback_none;
spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -297,6 +312,9 @@
if ( hvm_irq->callback_via_asserted )
__hvm_pci_intx_assert(d, pdev, pintx);
break;
+ case HVMIRQ_callback_vector:
+ hvm_irq->callback_via.vector = (uint8_t)via;
+ break;
default:
break;
}
@@ -312,6 +330,10 @@
case HVMIRQ_callback_pci_intx:
printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
break;
+ case HVMIRQ_callback_vector:
+ printk("Set HVMIRQ_callback_vector to %u\n",
+ hvm_irq->callback_via.vector);
+ break;
default:
printk("None\n");
break;
@@ -323,6 +345,10 @@
struct hvm_domain *plat = &v->domain->arch.hvm_domain;
int vector;
+ if (plat->irq.callback_via_type == HVMIRQ_callback_vector &&
+ vcpu_info(v, evtchn_upcall_pending))
+ return hvm_intack_vector(plat->irq.callback_via.vector);
+
if ( unlikely(v->nmi_pending) )
return hvm_intack_nmi;
@@ -363,6 +389,8 @@
case hvm_intsrc_lapic:
if ( !vlapic_ack_pending_irq(v, intack.vector) )
intack = hvm_intack_none;
+ break;
+ case hvm_intsrc_vector:
break;
default:
intack = hvm_intack_none;
diff -r 3bb163b74673 xen/arch/x86/hvm/vmx/intr.c
--- a/xen/arch/x86/hvm/vmx/intr.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/hvm/vmx/intr.c Tue Mar 02 17:20:42 2010 +0000
@@ -164,7 +164,8 @@
{
HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
vmx_inject_extint(intack.vector);
- pt_intr_post(v, intack);
+ if (intack.source != hvm_intsrc_vector)
+ pt_intr_post(v, intack);
}
/* Is there another IRQ to queue up behind this one? */
diff -r 3bb163b74673 xen/arch/x86/irq.c
--- a/xen/arch/x86/irq.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/irq.c Tue Mar 02 17:20:42 2010 +0000
@@ -1733,3 +1733,71 @@
peoi[sp].ready = 1;
flush_ready_eoi();
}
+
+int map_domain_emuirq_pirq(
+ struct domain *d, int pirq, int emuirq, int type, void *data)
+{
+ int old_emuirq, old_pirq, ret = 0;
+
+ ASSERT(spin_is_locked(&d->event_lock));
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ if ( pirq < 0 || pirq >= d->nr_pirqs || emuirq < 0 || emuirq >= nr_irqs )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: invalid pirq %d or irq %d\n",
+ d->domain_id, pirq, emuirq);
+ return -EINVAL;
+ }
+
+ old_emuirq = domain_pirq_to_emuirq(d, pirq);
+ old_pirq = domain_emuirq_to_pirq(d, emuirq);
+
+ if ( (old_emuirq > 0 && (old_emuirq != emuirq) ) ||
+ (old_pirq > 0 && (old_pirq != pirq)) )
+ {
+ dprintk(XENLOG_G_WARNING, "dom%d: pirq %d or emuirq %d already
mapped\n",
+ d->domain_id, pirq, emuirq);
+ return 0;
+ }
+
+ if ( type == MAP_PIRQ_TYPE_GSI )
+ {
+ d->arch.pirq_emuirq[pirq] = emuirq;
+ d->arch.emuirq_pirq[emuirq] = pirq;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* The pirq should have been unbound before this call. */
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq)
+{
+ int emuirq, ret = 0;
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
+ return -EINVAL;
+
+ ASSERT(spin_is_locked(&d->event_lock));
+
+ emuirq = domain_pirq_to_emuirq(d, pirq);
+ if ( emuirq <= 0 )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: pirq %d not mapped\n",
+ d->domain_id, pirq);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ d->arch.pirq_emuirq[pirq] = -1;
+ d->arch.emuirq_pirq[emuirq] = -1;
+
+ done:
+ return ret;
+}
diff -r 3bb163b74673 xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/arch/x86/physdev.c Tue Mar 02 17:20:42 2010 +0000
@@ -44,6 +44,23 @@
if ( d == NULL )
return -ESRCH;
+
+ if ( map->domid == DOMID_SELF && is_hvm_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ switch ( map->type )
+ {
+ case MAP_PIRQ_TYPE_GSI :
+ ret = map_domain_emuirq_pirq(d, map->pirq, map->index,
map->type, NULL);
+ break;
+ default :
+ ret = -EINVAL;
+ dprintk(XENLOG_G_WARNING, "map type %d not supported yet\n",
map->type);
+ break;
+ }
+ spin_unlock(&d->event_lock);
+ return ret;
+ }
if ( !IS_PRIV_FOR(current->domain, d) )
{
@@ -173,6 +190,14 @@
if ( d == NULL )
return -ESRCH;
+ if ( is_hvm_domain(d) )
+ {
+ spin_lock(&d->event_lock);
+ ret = unmap_domain_pirq_emuirq(d, unmap->pirq);
+ spin_unlock(&d->event_lock);
+ goto free_domain;
+ }
+
ret = -EPERM;
if ( !IS_PRIV_FOR(current->domain, d) )
goto free_domain;
@@ -206,7 +231,10 @@
break;
if ( v->domain->arch.pirq_eoi_map )
evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
- ret = pirq_guest_eoi(v->domain, eoi.irq);
+ if ( !is_hvm_domain(v->domain) )
+ ret = pirq_guest_eoi(v->domain, eoi.irq);
+ else
+ ret = 0;
break;
}
@@ -261,6 +289,12 @@
if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
break;
irq_status_query.flags = 0;
+ if ( is_hvm_domain(v->domain) )
+ {
+ ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
+ break;
+ }
+
/*
* Even edge-triggered or message-based IRQs can need masking from
* time to time. If teh guest is not dynamically checking for this
@@ -465,6 +499,11 @@
case PHYSDEVOP_setup_gsi: {
struct physdev_setup_gsi setup_gsi;
+ /* In the HVM case we are setting up an emulated GSI, therefore
+ * there is no need to do anything here */
+ if ( is_hvm_domain(v->domain) )
+ return 0;
+
ret = -EPERM;
if ( !IS_PRIV(v->domain) )
break;
diff -r 3bb163b74673 xen/common/event_channel.c
--- a/xen/common/event_channel.c Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/common/event_channel.c Tue Mar 02 17:20:42 2010 +0000
@@ -305,7 +305,7 @@
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
- if ( !irq_access_permitted(d, pirq) )
+ if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) )
return -EPERM;
spin_lock(&d->event_lock);
@@ -319,12 +319,14 @@
chn = evtchn_from_port(d, port);
d->pirq_to_evtchn[pirq] = port;
- rc = pirq_guest_bind(d->vcpu[0], pirq,
- !!(bind->flags & BIND_PIRQ__WILL_SHARE));
- if ( rc != 0 )
- {
- d->pirq_to_evtchn[pirq] = 0;
- goto out;
+ if ( !is_hvm_domain(d) ) {
+ rc = pirq_guest_bind(d->vcpu[0], pirq,
+ !!(bind->flags & BIND_PIRQ__WILL_SHARE));
+ if ( rc != 0 )
+ {
+ d->pirq_to_evtchn[pirq] = 0;
+ goto out;
+ }
}
chn->state = ECS_PIRQ;
@@ -376,7 +378,8 @@
break;
case ECS_PIRQ:
- pirq_guest_unbind(d1, chn1->u.pirq);
+ if ( !is_hvm_domain(d1) )
+ pirq_guest_unbind(d1, chn1->u.pirq);
d1->pirq_to_evtchn[chn1->u.pirq] = 0;
break;
@@ -636,8 +639,17 @@
/*
* It should not be possible to race with __evtchn_close():
* The caller of this function must synchronise with pirq_guest_unbind().
+ *
+ * In the HVM case port is 0 when the guest disable the
+ * emulated interrupt\evtchn.
*/
- ASSERT(port != 0);
+ if (!port)
+ {
+ if ( is_hvm_domain(d) && domain_pirq_to_emuirq(d, pirq) >= 0)
+ return 0;
+ else
+ return -EINVAL;
+ }
chn = evtchn_from_port(d, port);
return evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
@@ -824,6 +836,7 @@
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg)
{
long rc;
+ struct domain *d = current->domain;
switch ( cmd )
{
@@ -852,6 +865,10 @@
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_virq(&bind_virq);
+ if ( is_hvm_domain(d) && bind_virq.virq == VIRQ_TIMER) {
+ update_domain_wallclock_time(d);
+ hvm_funcs.set_tsc_offset(d->vcpu[0], 0);
+ }
if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
diff -r 3bb163b74673 xen/include/asm-x86/domain.h
--- a/xen/include/asm-x86/domain.h Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/include/asm-x86/domain.h Tue Mar 02 17:20:42 2010 +0000
@@ -278,6 +278,9 @@
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
int *irq_pirq;
int *pirq_irq;
+ /* pirq to emulated irq and vice versa */
+ int *emuirq_pirq;
+ int *pirq_emuirq;
/* Shared page for notifying that explicit PIRQ EOI is required. */
unsigned long *pirq_eoi_map;
diff -r 3bb163b74673 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/include/asm-x86/hvm/hvm.h Tue Mar 02 17:20:42 2010 +0000
@@ -33,7 +33,8 @@
hvm_intsrc_pic,
hvm_intsrc_lapic,
hvm_intsrc_nmi,
- hvm_intsrc_mce
+ hvm_intsrc_mce,
+ hvm_intsrc_vector
};
struct hvm_intack {
uint8_t source; /* enum hvm_intsrc */
@@ -44,6 +45,7 @@
#define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
#define hvm_intack_nmi ( (struct hvm_intack) { hvm_intsrc_nmi, 2 } )
#define hvm_intack_mce ( (struct hvm_intack) { hvm_intsrc_mce, 18 } )
+#define hvm_intack_vector(vec)( (struct hvm_intack) { hvm_intsrc_vector, vec }
)
enum hvm_intblk {
hvm_intblk_none, /* not blocked (deliverable) */
hvm_intblk_shadow, /* MOV-SS or STI shadow */
diff -r 3bb163b74673 xen/include/asm-x86/hvm/irq.h
--- a/xen/include/asm-x86/hvm/irq.h Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/include/asm-x86/hvm/irq.h Tue Mar 02 17:20:42 2010 +0000
@@ -54,12 +54,14 @@
enum {
HVMIRQ_callback_none,
HVMIRQ_callback_gsi,
- HVMIRQ_callback_pci_intx
+ HVMIRQ_callback_pci_intx,
+ HVMIRQ_callback_vector
} callback_via_type;
};
union {
uint32_t gsi;
struct { uint8_t dev, intx; } pci;
+ uint32_t vector;
} callback_via;
/* Number of INTx wires asserting each PCI-ISA link. */
diff -r 3bb163b74673 xen/include/asm-x86/irq.h
--- a/xen/include/asm-x86/irq.h Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/include/asm-x86/irq.h Tue Mar 02 17:20:42 2010 +0000
@@ -112,6 +112,9 @@
int map_domain_pirq(struct domain *d, int pirq, int irq, int type,
void *data);
int unmap_domain_pirq(struct domain *d, int pirq);
+int map_domain_emuirq_pirq(struct domain *d, int pirq, int irq, int type,
+ void *data);
+int unmap_domain_pirq_emuirq(struct domain *d, int pirq);
int get_free_pirq(struct domain *d, int type, int index);
void free_domain_pirqs(struct domain *d);
@@ -147,5 +150,7 @@
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
+#define domain_pirq_to_emuirq(d, pirq) ((d)->arch.pirq_emuirq[pirq])
+#define domain_emuirq_to_pirq(d, emuirq) ((d)->arch.emuirq_pirq[emuirq])
#endif /* _ASM_HW_IRQ_H */
diff -r 3bb163b74673 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h Fri Feb 12 09:24:18 2010 +0000
+++ b/xen/include/xen/sched.h Tue Mar 02 17:20:42 2010 +0000
@@ -592,6 +592,9 @@
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
#define is_hvm_domain(d) ((d)->is_hvm)
+#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
+ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
+#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
#define need_iommu(d) ((d)->need_iommu)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|