# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 66c27919578f374b3d239b3db63830f7c1afb5df
# Parent 36b76e5514567dbf1a1b93f4a0decc4fb4bf1433
[IA64] catch up `new Xen event channels'
DM over to using the new Xen event channels for IPF
Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@xxxxxxxxxxxxxx>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@xxxxxxxxxxxxxx>
---
xen/arch/ia64/vmx/mmio.c | 15 +---
xen/arch/ia64/vmx/vmx_init.c | 44 ++++++++++---
xen/arch/ia64/vmx/vmx_support.c | 132 ++++++++++++++++++----------------------
xen/arch/ia64/xen/domain.c | 3
xen/include/asm-ia64/vmx.h | 7 --
xen/include/asm-ia64/vmx_vpd.h | 3
6 files changed, 107 insertions(+), 97 deletions(-)
diff -r 36b76e551456 -r 66c27919578f xen/arch/ia64/vmx/mmio.c
--- a/xen/arch/ia64/vmx/mmio.c Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/arch/ia64/vmx/mmio.c Wed Aug 23 13:13:51 2006 -0600
@@ -155,10 +155,9 @@ static void low_mmio_access(VCPU *vcpu,
p->type = 1;
p->df = 0;
- set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
- p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v));
- vmx_wait_io();
+ p->io_count++;
+
+ vmx_send_assist_req(v);
if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
@@ -187,11 +186,9 @@ static void legacy_io_access(VCPU *vcpu,
p->type = 0;
p->df = 0;
- set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
- p->state = STATE_IOREQ_READY;
- evtchn_send(iopacket_port(v));
-
- vmx_wait_io();
+ p->io_count++;
+
+ vmx_send_assist_req(v);
if(dir==IOREQ_READ){ //read
*val=p->u.data;
}
diff -r 36b76e551456 -r 66c27919578f xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_init.c Wed Aug 23 13:13:51 2006 -0600
@@ -206,7 +206,7 @@ vmx_create_vp(struct vcpu *v)
u64 ret;
vpd_t *vpd = (vpd_t *)v->arch.privregs;
u64 ivt_base;
- extern char vmx_ia64_ivt;
+ extern char vmx_ia64_ivt;
/* ia64_ivt is function pointer, so need this tranlation */
ivt_base = (u64) &vmx_ia64_ivt;
printk("ivt_base: 0x%lx\n", ivt_base);
@@ -265,6 +265,29 @@ vmx_load_state(struct vcpu *v)
* anchored in vcpu */
}
+static void vmx_create_event_channels(struct vcpu *v)
+{
+ vcpu_iodata_t *p;
+ struct vcpu *o;
+
+ if (v->vcpu_id == 0) {
+ /* Ugly: create event channels for every vcpu when vcpu 0
+ starts, so that they're available for ioemu to bind to. */
+ for_each_vcpu(v->domain, o) {
+ p = get_vio(v->domain, o->vcpu_id);
+ o->arch.arch_vmx.xen_port = p->vp_eport =
+ alloc_unbound_xen_event_channel(o, 0);
+ DPRINTK("Allocated port %d for hvm.\n",
+ o->arch.arch_vmx.xen_port);
+ }
+ }
+}
+
+static void vmx_release_assist_channel(struct vcpu *v)
+{
+ free_xen_event_channel(v, v->arch.arch_vmx.xen_port);
+}
+
/*
* Initialize VMX envirenment for guest. Only the 1st vp/vcpu
* is registered here.
@@ -286,6 +309,8 @@ vmx_final_setup_guest(struct vcpu *v)
#ifndef HASH_VHPT
init_domain_tlb(v);
#endif
+ vmx_create_event_channels(v);
+
/* v->arch.schedule_tail = arch_vmx_do_launch; */
vmx_create_vp(v);
@@ -301,6 +326,15 @@ vmx_final_setup_guest(struct vcpu *v)
set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
/* Set up guest 's indicator for VTi domain*/
set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
+}
+
+void
+vmx_relinquish_guest_resources(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu(d, v)
+ vmx_release_assist_channel(v);
}
void
@@ -420,13 +454,5 @@ void vmx_setup_platform(struct domain *d
void vmx_do_launch(struct vcpu *v)
{
- if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
- printk("VMX domain bind port %d to vcpu %d failed!\n",
- iopacket_port(v), v->vcpu_id);
- domain_crash_synchronous();
- }
-
- clear_bit(iopacket_port(v), &v->domain->shared_info->evtchn_mask[0]);
-
vmx_load_all_rr(v);
}
diff -r 36b76e551456 -r 66c27919578f xen/arch/ia64/vmx/vmx_support.c
--- a/xen/arch/ia64/vmx/vmx_support.c Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_support.c Wed Aug 23 13:13:51 2006 -0600
@@ -1,4 +1,3 @@
-
/* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
/*
* vmx_support.c: vmx specific support interface.
@@ -22,45 +21,11 @@
#include <xen/config.h>
#include <xen/sched.h>
#include <xen/hypercall.h>
+#include <xen/event.h>
#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <asm/vmx.h>
#include <asm/vmx_vcpu.h>
-
-/*
- * I/O emulation should be atomic from domain point of view. However,
- * when emulation code is waiting for I/O completion by blocking,
- * other events like DM interrupt, VBD, etc. may come and unblock
- * current exection flow. So we have to prepare for re-block if unblocked
- * by non I/O completion event. After io emulation is done, re-enable
- * pending indicaion if other ports are pending
- */
-void vmx_wait_io(void)
-{
- struct vcpu *v = current;
- struct domain *d = v->domain;
- int port = iopacket_port(v);
-
- for (;;) {
- if (test_and_clear_bit(0, &v->vcpu_info->evtchn_upcall_pending) &&
- test_and_clear_bit(port / BITS_PER_LONG,
- &v->vcpu_info->evtchn_pending_sel) &&
- test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]))
- vmx_io_assist(v);
-
- if (!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- break;
-
- do_sched_op_compat(SCHEDOP_block, 0);
- }
-
- /* re-enable indication if other pending events */
- if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
- set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
-
- if (v->vcpu_info->evtchn_pending_sel)
- set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
-}
/*
* Only place to call vmx_io_assist is mmio/legacy_io emulation.
@@ -83,17 +48,15 @@ void vmx_io_assist(struct vcpu *v)
p = &vio->vp_ioreq;
- if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
- if (p->state != STATE_IORESP_READY) {
- /* Can't block here, for the same reason as other places to
- * use vmx_wait_io. Simple return is safe since vmx_wait_io will
- * try to block again
- */
- return;
- } else
- p->state = STATE_INVALID;
-
- clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
+ if (p->state == STATE_IORESP_READY) {
+ p->state = STATE_INVALID;
+ }
+ else {
+ /* Can't block here, for the same reason as other places to
+ * use vmx_wait_io. Simple return is safe since vmx_wait_io will
+ * try to block again
+ */
+ return;
}
}
@@ -108,31 +71,6 @@ void vmx_io_assist(struct vcpu *v)
*/
void vmx_intr_assist(struct vcpu *v)
{
- vcpu_iodata_t *vio;
- struct domain *d = v->domain;
- extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
- unsigned long *pend_irr);
- int port = iopacket_port(v);
-
- if (test_bit(port, &d->shared_info->evtchn_pending[0]) ||
- test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- vmx_wait_io();
-
- /* I/O emulation is atomic, so it's impossible to see execution flow
- * out of vmx_wait_io, when guest is still waiting for response.
- */
- if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
- panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation
is done.\n");
-
- /* Even without event pending, we still need to sync pending bits
- * between DM and vlsapic. The reason is that interrupt delivery
- * shares same event channel as I/O emulation, with corresponding
- * indicator possibly cleared when vmx_wait_io().
- */
- vio = get_vio(v->domain, v->vcpu_id);
- if (!vio)
- panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
(unsigned long)vio);
-
#ifdef V_IOSAPIC_READY
/* Confirm virtual interrupt line signals, and set pending bits in vpd */
if(v->vcpu_id==0)
@@ -140,3 +78,53 @@ void vmx_intr_assist(struct vcpu *v)
#endif
return;
}
+
+void vmx_send_assist_req(struct vcpu *v)
+{
+ ioreq_t *p;
+
+ p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
+ if (unlikely(p->state != STATE_INVALID)) {
+ /* This indicates a bug in the device model. Crash the
+ domain. */
+ printk("Device model set bad IO state %d.\n", p->state);
+ domain_crash(v->domain);
+ return;
+ }
+ wmb();
+ p->state = STATE_IOREQ_READY;
+ notify_via_xen_event_channel(v->arch.arch_vmx.xen_port);
+
+ /*
+ * Waiting for MMIO completion
+ * like the wait_on_xen_event_channel() macro like...
+ * but, we can't call do_softirq() at this point..
+ */
+ for (;;) {
+ if (p->state != STATE_IOREQ_READY &&
+ p->state != STATE_IOREQ_INPROCESS)
+ break;
+
+ set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ mb(); /* set blocked status /then/ re-evaluate condition */
+ if (p->state != STATE_IOREQ_READY &&
+ p->state != STATE_IOREQ_INPROCESS)
+ {
+ clear_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ break;
+ }
+
+ /* I want to call __enter_scheduler() only */
+ do_sched_op_compat(SCHEDOP_yield, 0);
+ mb();
+ }
+
+ /* the code under this line is completer phase... */
+ vmx_io_assist(v);
+}
+
+/* Wake up a vcpu whihc is waiting for interrupts to come in */
+void vmx_prod_vcpu(struct vcpu *v)
+{
+ vcpu_unblock(v);
+}
diff -r 36b76e551456 -r 66c27919578f xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c Wed Aug 23 13:13:51 2006 -0600
@@ -543,6 +543,9 @@ void domain_relinquish_resources(struct
// relase page traversing d->arch.mm.
relinquish_mm(d);
+ if (d->vcpu[0] && VMX_DOMAIN(d->vcpu[0]))
+ vmx_relinquish_guest_resources(d);
+
relinquish_memory(d, &d->xenpage_list);
relinquish_memory(d, &d->page_list);
diff -r 36b76e551456 -r 66c27919578f xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/include/asm-ia64/vmx.h Wed Aug 23 13:13:51 2006 -0600
@@ -35,7 +35,6 @@ extern void vmx_save_state(struct vcpu *
extern void vmx_save_state(struct vcpu *v);
extern void vmx_load_state(struct vcpu *v);
extern void vmx_setup_platform(struct domain *d);
-extern void vmx_wait_io(void);
extern void vmx_io_assist(struct vcpu *v);
extern int ia64_hypercall (struct pt_regs *regs);
extern void vmx_save_state(struct vcpu *v);
@@ -53,17 +52,13 @@ extern void vmx_intr_assist(struct vcpu
extern void vmx_intr_assist(struct vcpu *v);
extern void set_illegal_op_isr (struct vcpu *vcpu);
extern void illegal_op (struct vcpu *vcpu);
+extern void vmx_relinquish_guest_resources(struct domain *d);
extern void vmx_relinquish_vcpu_resources(struct vcpu *v);
extern void vmx_die_if_kernel(char *str, struct pt_regs *regs, long err);
static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
{
return &((shared_iopage_t
*)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu];
-}
-
-static inline int iopacket_port(struct vcpu *v)
-{
- return get_vio(v->domain, v->vcpu_id)->vp_eport;
}
static inline shared_iopage_t *get_sp(struct domain *d)
diff -r 36b76e551456 -r 66c27919578f xen/include/asm-ia64/vmx_vpd.h
--- a/xen/include/asm-ia64/vmx_vpd.h Wed Aug 23 12:56:10 2006 -0600
+++ b/xen/include/asm-ia64/vmx_vpd.h Wed Aug 23 13:13:51 2006 -0600
@@ -96,7 +96,8 @@ struct arch_vmx_struct {
// unsigned long rfi_ipsr;
// unsigned long rfi_ifs;
// unsigned long in_service[4]; // vLsapic inservice IRQ bits
- unsigned long flags;
+ unsigned long flags;
+ unsigned long xen_port;
#ifdef VTI_DEBUG
unsigned long ivt_current;
struct ivt_debug ivt_debug[IVT_DEBUG_MAX];
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|