WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Per-vcpu IO evtchn patch for HVM domain.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Per-vcpu IO evtchn patch for HVM domain.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 23 Feb 2006 11:26:06 +0000
Delivery-date: Thu, 23 Feb 2006 11:26:18 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID b5bb9920bf48472382bd831f5aa5d1c24fdecc6e
# Parent  175ad739d8bc42a32fc293bc38057823038a0cc4
Per-vcpu IO evtchn patch for HVM domain.
We are starting to send patches to support SMP VMX guest.

Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>

diff -r 175ad739d8bc -r b5bb9920bf48 tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Wed Feb 22 20:52:30 2006
+++ b/tools/ioemu/target-i386-dm/helper2.c      Thu Feb 23 10:22:25 2006
@@ -125,9 +125,8 @@
 //the evtchn fd for polling
 int evtchn_fd = -1;
 
-//the evtchn port for polling the notification,
-//should be inputed as bochs's parameter
-evtchn_port_t ioreq_remote_port, ioreq_local_port;
+//which vcpu we are serving
+int send_vcpu = 0;
 
 //some functions to handle the io req packet
 void sp_info()
@@ -135,52 +134,62 @@
     ioreq_t *req;
     int i;
 
-    term_printf("event port: %d\n", shared_page->sp_global.eport);
     for ( i = 0; i < vcpus; i++ ) {
         req = &(shared_page->vcpu_iodata[i].vp_ioreq);
-        term_printf("vcpu %d:\n", i);
+        term_printf("vcpu %d: event port %d\n",
+                    i, shared_page->vcpu_iodata[i].vp_eport);
         term_printf("  req state: %x, pvalid: %x, addr: %llx, "
                     "data: %llx, count: %llx, size: %llx\n",
                     req->state, req->pdata_valid, req->addr,
                     req->u.data, req->count, req->size);
+        term_printf("  IO totally occurred on this vcpu: %llx\n",
+                    req->io_count);
     }
 }
 
 //get the ioreq packets from share mem
-ioreq_t* __cpu_get_ioreq(void)
+static ioreq_t* __cpu_get_ioreq(int vcpu)
 {
     ioreq_t *req;
 
-    req = &(shared_page->vcpu_iodata[0].vp_ioreq);
-    if (req->state == STATE_IOREQ_READY) {
-        req->state = STATE_IOREQ_INPROCESS;
-    } else {
-        fprintf(logfile, "False I/O request ... in-service already: "
-                         "%x, pvalid: %x, port: %llx, "
-                         "data: %llx, count: %llx, size: %llx\n",
-                         req->state, req->pdata_valid, req->addr,
-                         req->u.data, req->count, req->size);
-        req = NULL;
-    }
-
-    return req;
+    req = &(shared_page->vcpu_iodata[vcpu].vp_ioreq);
+
+    if ( req->state == STATE_IOREQ_READY )
+        return req;
+
+    fprintf(logfile, "False I/O request ... in-service already: "
+                     "%x, pvalid: %x, port: %llx, "
+                     "data: %llx, count: %llx, size: %llx\n",
+                     req->state, req->pdata_valid, req->addr,
+                     req->u.data, req->count, req->size);
+    return NULL;
 }
 
 //use poll to get the port notification
 //ioreq_vec--out,the
 //retval--the number of ioreq packet
-ioreq_t* cpu_get_ioreq(void)
-{
-    int rc;
+static ioreq_t* cpu_get_ioreq(void)
+{
+    int i, rc;
     evtchn_port_t port;
 
     rc = read(evtchn_fd, &port, sizeof(port));
-    if ((rc == sizeof(port)) && (port == ioreq_local_port)) {
+    if ( rc == sizeof(port) ) {
+        for ( i = 0; i < vcpus; i++ )
+            if ( shared_page->vcpu_iodata[i].dm_eport == port )
+                break;
+
+        if ( i == vcpus ) {
+            fprintf(logfile, "Fatal error while trying to get io event!\n");
+            exit(1);
+        }
+
         // unmask the wanted port again
-        write(evtchn_fd, &ioreq_local_port, sizeof(port));
+        write(evtchn_fd, &port, sizeof(port));
 
         //get the io packet from shared memory
-        return __cpu_get_ioreq();
+        send_vcpu = i;
+        return __cpu_get_ioreq(i);
     }
 
     //read error or read nothing
@@ -361,6 +370,8 @@
     ioreq_t *req = cpu_get_ioreq();
 
     if (req) {
+        req->state = STATE_IOREQ_INPROCESS;
+
         if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
             if (req->size != 4)
                 req->u.data &= (1UL << (8 * req->size))-1;
@@ -465,7 +476,7 @@
             struct ioctl_evtchn_notify notify;
 
             env->send_event = 0;
-            notify.port = ioreq_local_port;
+            notify.port = shared_page->vcpu_iodata[send_vcpu].dm_eport;
             (void)ioctl(evtchn_fd, IOCTL_EVTCHN_NOTIFY, &notify);
         }
     }
@@ -488,7 +499,7 @@
 {
     CPUX86State *env;
     struct ioctl_evtchn_bind_interdomain bind;
-    int rc;
+    int i, rc;
 
     cpu_exec_init();
     qemu_register_reset(qemu_hvm_reset, NULL);
@@ -509,14 +520,17 @@
         return NULL;
     }
 
+    /* FIXME: how about if we overflow the page here? */
     bind.remote_domain = domid;
-    bind.remote_port   = ioreq_remote_port;
-    rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
-    if (rc == -1) {
-        fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
-        return NULL;
-    }
-    ioreq_local_port = rc;
+    for ( i = 0; i < vcpus; i++ ) {
+        bind.remote_port = shared_page->vcpu_iodata[i].vp_eport;
+        rc = ioctl(evtchn_fd, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
+        if ( rc == -1 ) {
+            fprintf(logfile, "bind interdomain ioctl error %d\n", errno);
+            return NULL;
+        }
+        shared_page->vcpu_iodata[i].dm_eport = rc;
+    }
 
     return env;
 }
diff -r 175ad739d8bc -r b5bb9920bf48 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Wed Feb 22 20:52:30 2006
+++ b/tools/ioemu/vl.c  Thu Feb 23 10:22:25 2006
@@ -2337,7 +2337,6 @@
 
     QEMU_OPTION_S,
     QEMU_OPTION_s,
-    QEMU_OPTION_p,
     QEMU_OPTION_d,
     QEMU_OPTION_l,
     QEMU_OPTION_hdachs,
@@ -2414,7 +2413,6 @@
 
     { "S", 0, QEMU_OPTION_S },
     { "s", 0, QEMU_OPTION_s },
-    { "p", HAS_ARG, QEMU_OPTION_p },
     { "d", HAS_ARG, QEMU_OPTION_d },
     { "l", HAS_ARG, QEMU_OPTION_l },
     { "hdachs", HAS_ARG, QEMU_OPTION_hdachs },
@@ -2936,13 +2934,6 @@
                 {
                     domid = atoi(optarg);
                     fprintf(logfile, "domid: %d\n", domid);
-                }
-                break;
-            case QEMU_OPTION_p:
-                {
-                    extern evtchn_port_t ioreq_remote_port;
-                    ioreq_remote_port = atoi(optarg);
-                    fprintf(logfile, "eport: %d\n", ioreq_remote_port);
                 }
                 break;
             case QEMU_OPTION_l:
diff -r 175ad739d8bc -r b5bb9920bf48 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Wed Feb 22 20:52:30 2006
+++ b/tools/libxc/xc_hvm_build.c        Thu Feb 23 10:22:25 2006
@@ -175,7 +175,6 @@
                        unsigned long nr_pages,
                        vcpu_guest_context_t *ctxt,
                        unsigned long shared_info_frame,
-                       unsigned int control_evtchn,
                        unsigned int vcpus,
                        unsigned int pae,
                        unsigned int acpi,
@@ -284,7 +283,19 @@
          shared_page_frame)) == 0 )
         goto error_out;
     memset(sp, 0, PAGE_SIZE);
-    sp->sp_global.eport = control_evtchn;
+
+    /* FIXME: how about if we overflow the page here? */
+    for ( i = 0; i < vcpus; i++ ) {
+        unsigned int vp_eport;
+
+        vp_eport = xc_evtchn_alloc_unbound(xc_handle, dom, 0);
+        if ( vp_eport < 0 ) {
+            fprintf(stderr, "Couldn't get unbound port from VMX guest.\n");
+            goto error_out;
+        }
+        sp->vcpu_iodata[i].vp_eport = vp_eport;
+    }
+
     munmap(sp, PAGE_SIZE);
 
     *store_mfn = page_array[(v_end >> PAGE_SHIFT) - 2];
@@ -331,7 +342,6 @@
                  uint32_t domid,
                  int memsize,
                  const char *image_name,
-                 unsigned int control_evtchn,
                  unsigned int vcpus,
                  unsigned int pae,
                  unsigned int acpi,
@@ -388,7 +398,7 @@
 
     ctxt->flags = VGCF_HVM_GUEST;
     if ( setup_guest(xc_handle, domid, memsize, image, image_size, nr_pages,
-                     ctxt, op.u.getdomaininfo.shared_info_frame, 
control_evtchn,
+                     ctxt, op.u.getdomaininfo.shared_info_frame,
                      vcpus, pae, acpi, apic, store_evtchn, store_mfn) < 0)
     {
         ERROR("Error constructing guest OS");
diff -r 175ad739d8bc -r b5bb9920bf48 tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h    Wed Feb 22 20:52:30 2006
+++ b/tools/libxc/xenguest.h    Thu Feb 23 10:22:25 2006
@@ -57,7 +57,6 @@
                  uint32_t domid,
                  int memsize,
                  const char *image_name,
-                 unsigned int control_evtchn,
                  unsigned int vcpus,
                  unsigned int pae,
                  unsigned int acpi,
diff -r 175ad739d8bc -r b5bb9920bf48 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Feb 22 20:52:30 2006
+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Feb 23 10:22:25 2006
@@ -363,7 +363,7 @@
 {
     uint32_t dom;
     char *image;
-    int control_evtchn, store_evtchn;
+    int store_evtchn;
     int memsize;
     int vcpus = 1;
     int pae  = 0;
@@ -371,15 +371,15 @@
     int apic = 0;
     unsigned long store_mfn = 0;
 
-    static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn",
+    static char *kwd_list[] = { "dom", "store_evtchn",
                                "memsize", "image", "vcpus", "pae", "acpi", 
"apic",
                                NULL };
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiisiiii", kwd_list,
-                                      &dom, &control_evtchn, &store_evtchn,
-                                     &memsize, &image, &vcpus, &pae, &acpi, 
&apic) )
-        return NULL;
-
-    if ( xc_hvm_build(self->xc_handle, dom, memsize, image, control_evtchn,
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiisiiii", kwd_list,
+                                      &dom, &store_evtchn, &memsize,
+                                      &image, &vcpus, &pae, &acpi, &apic) )
+        return NULL;
+
+    if ( xc_hvm_build(self->xc_handle, dom, memsize, image,
                      vcpus, pae, acpi, apic, store_evtchn, &store_mfn) != 0 )
         return PyErr_SetFromErrno(xc_error);
 
diff -r 175ad739d8bc -r b5bb9920bf48 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Wed Feb 22 20:52:30 2006
+++ b/tools/python/xen/xend/image.py    Thu Feb 23 10:22:25 2006
@@ -205,7 +205,6 @@
                         ("image/device-model", self.device_model),
                         ("image/display", self.display))
 
-        self.device_channel = None
         self.pid = 0
 
         self.dmargs += self.configVNC(imageConfig)
@@ -216,16 +215,10 @@
         self.apic = int(sxp.child_value(imageConfig, 'apic', 0))
 
     def buildDomain(self):
-        # Create an event channel
-        self.device_channel = xc.evtchn_alloc_unbound(dom=self.vm.getDomid(),
-                                                      remote_dom=0)
-        log.info("HVM device model port: %d", self.device_channel)
-
         store_evtchn = self.vm.getStorePort()
 
         log.debug("dom            = %d", self.vm.getDomid())
         log.debug("image          = %s", self.kernel)
-        log.debug("control_evtchn = %d", self.device_channel)
         log.debug("store_evtchn   = %d", store_evtchn)
         log.debug("memsize        = %d", self.vm.getMemoryTarget() / 1024)
         log.debug("vcpus          = %d", self.vm.getVCpuCount())
@@ -237,7 +230,6 @@
 
         return xc.hvm_build(dom            = self.vm.getDomid(),
                             image          = self.kernel,
-                            control_evtchn = self.device_channel,
                             store_evtchn   = store_evtchn,
                             memsize        = self.vm.getMemoryTarget() / 1024,
                             vcpus          = self.vm.getVCpuCount(),
@@ -345,7 +337,6 @@
         if len(vnc):
             args = args + vnc
         args = args + ([ "-d",  "%d" % self.vm.getDomid(),
-                  "-p", "%d" % self.device_channel,
                   "-m", "%s" % (self.vm.getMemoryTarget() / 1024)])
         args = args + self.dmargs
         env = dict(os.environ)
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/hvm.c    Thu Feb 23 10:22:25 2006
@@ -124,11 +124,6 @@
         domain_crash_synchronous();
     }
     d->arch.hvm_domain.shared_page_va = (unsigned long)p;
-
-    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d));
-
-    clear_bit(iopacket_port(d),
-              &d->shared_info->evtchn_mask[0]);
 }
 
 static int validate_hvm_info(struct hvm_info_table *t)
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/intercept.c      Thu Feb 23 10:22:25 2006
@@ -332,8 +332,8 @@
 void hlt_timer_fn(void *data)
 {
     struct vcpu *v = data;
-    
-    evtchn_set_pending(v, iopacket_port(v->domain));
+
+    evtchn_set_pending(v, iopacket_port(v));
 }
 
 static __inline__ void missed_ticks(struct hvm_virpit*vpit)
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/io.c     Thu Feb 23 10:22:25 2006
@@ -697,8 +697,8 @@
 void hvm_wait_io(void)
 {
     struct vcpu *v = current;
-    struct domain *d = v->domain;    
-    int port = iopacket_port(d);
+    struct domain *d = v->domain;
+    int port = iopacket_port(v);
 
     for ( ; ; )
     {
@@ -729,8 +729,8 @@
 void hvm_safe_block(void)
 {
     struct vcpu *v = current;
-    struct domain *d = v->domain;    
-    int port = iopacket_port(d);
+    struct domain *d = v->domain;
+    int port = iopacket_port(v);
 
     for ( ; ; )
     {
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/platform.c       Thu Feb 23 10:22:25 2006
@@ -41,8 +41,6 @@
 
 #define DECODE_success  1
 #define DECODE_failure  0
-
-extern long evtchn_send(int lport);
 
 #if defined (__x86_64__)
 static inline long __get_reg_value(unsigned long reg, int size)
@@ -648,6 +646,8 @@
     p->count = count;
     p->df = regs->eflags & EF_DF ? 1 : 0;
 
+    p->io_count++;
+
     if (pvalid) {
         if (hvm_paging_enabled(current))
             p->u.pdata = (void *) gva_to_gpa(value);
@@ -664,18 +664,18 @@
 
     p->state = STATE_IOREQ_READY;
 
-    evtchn_send(iopacket_port(v->domain));
+    evtchn_send(iopacket_port(v));
     hvm_wait_io();
 }
 
-void send_mmio_req(unsigned char type, unsigned long gpa,
-                   unsigned long count, int size, long value, int dir, int 
pvalid)
+void send_mmio_req(
+    unsigned char type, unsigned long gpa,
+    unsigned long count, int size, long value, int dir, int pvalid)
 {
     struct vcpu *v = current;
     vcpu_iodata_t *vio;
     ioreq_t *p;
     struct cpu_user_regs *regs;
-    extern long evtchn_send(int lport);
 
     regs = current->arch.hvm_vcpu.mmio_op.inst_decoder_regs;
 
@@ -701,6 +701,8 @@
     p->addr = gpa;
     p->count = count;
     p->df = regs->eflags & EF_DF ? 1 : 0;
+
+    p->io_count++;
 
     if (pvalid) {
         if (hvm_paging_enabled(v))
@@ -718,7 +720,7 @@
 
     p->state = STATE_IOREQ_READY;
 
-    evtchn_send(iopacket_port(v->domain));
+    evtchn_send(iopacket_port(v));
     hvm_wait_io();
 }
 
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Feb 23 10:22:25 2006
@@ -64,7 +64,6 @@
 /* 
  * External functions, etc. We should move these to some suitable header 
file(s) */
 
-extern long evtchn_send(int lport);
 extern void do_nmi(struct cpu_user_regs *, unsigned long);
 extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
                                 int inst_len);
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu Feb 23 10:22:25 2006
@@ -421,6 +421,18 @@
     if (v->vcpu_id == 0)
         hvm_setup_platform(v->domain);
 
+    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+    {
+        printk("HVM domain bind port %d to vcpu %d failed!\n",
+               iopacket_port(v), v->vcpu_id);
+        domain_crash_synchronous();
+    }
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+    clear_bit(iopacket_port(v),
+              &v->domain->shared_info->evtchn_mask[0]);
+
     if (hvm_apic_support(v->domain))
         vlapic_init(v);
     init_timer(&v->arch.hvm_svm.hlt_timer,
@@ -490,7 +502,7 @@
 
     svm_stts(v);
 
-    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
 
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/vlapic.c Thu Feb 23 10:22:25 2006
@@ -210,7 +210,7 @@
                 set_bit(vector, &vlapic->tmr[0]);
             }
         }
-        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
         result = 1;
         break;
 
@@ -834,7 +834,7 @@
         }
         else
             vlapic->intr_pending_count[vlapic_lvt_vector(vlapic, 
VLAPIC_LVT_TIMER)]++;
-        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->domain));
+        evtchn_set_pending(vlapic->vcpu, iopacket_port(vlapic->vcpu));
     }
 
     vlapic->timer_current_update = NOW();
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/vmx/io.c
--- a/xen/arch/x86/hvm/vmx/io.c Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/vmx/io.c Thu Feb 23 10:22:25 2006
@@ -178,7 +178,7 @@
 
     vmx_stts();
 
-    if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
+    if ( test_bit(iopacket_port(v), &d->shared_info->evtchn_pending[0]) ||
          test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
         hvm_wait_io();
 
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Thu Feb 23 10:22:25 2006
@@ -200,6 +200,18 @@
     if (v->vcpu_id == 0)
         hvm_setup_platform(v->domain);
 
+    if ( evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0 )
+    {
+        printk("VMX domain bind port %d to vcpu %d failed!\n",
+               iopacket_port(v), v->vcpu_id);
+        domain_crash_synchronous();
+    }
+
+    HVM_DBG_LOG(DBG_LEVEL_1, "eport: %x", iopacket_port(v));
+
+    clear_bit(iopacket_port(v),
+              &v->domain->shared_info->evtchn_mask[0]);
+
     __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
 
     error |= __vmwrite(GUEST_CR0, cr0);
diff -r 175ad739d8bc -r b5bb9920bf48 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Feb 22 20:52:30 2006
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Feb 23 10:22:25 2006
@@ -448,7 +448,6 @@
     return 0;                   /* dummy */
 }
 
-extern long evtchn_send(int lport);
 void do_nmi(struct cpu_user_regs *);
 
 static int check_vmx_controls(ctrls, msr)
diff -r 175ad739d8bc -r b5bb9920bf48 xen/common/event_channel.c
--- a/xen/common/event_channel.c        Wed Feb 22 20:52:30 2006
+++ b/xen/common/event_channel.c        Thu Feb 23 10:22:25 2006
@@ -399,7 +399,7 @@
 }
 
 
-long evtchn_send(int lport)
+long evtchn_send(unsigned int lport)
 {
     struct evtchn *lchn, *rchn;
     struct domain *ld = current->domain, *rd;
@@ -508,15 +508,13 @@
     return rc;
 }
 
-static long evtchn_bind_vcpu(evtchn_bind_vcpu_t *bind) 
-{
-    struct domain *d    = current->domain;
-    int            port = bind->port;
-    int            vcpu = bind->vcpu;
+long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
+{
+    struct domain *d = current->domain;
     struct evtchn *chn;
     long           rc = 0;
 
-    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
+    if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
         return -ENOENT;
 
     spin_lock(&d->evtchn_lock);
@@ -533,7 +531,7 @@
     case ECS_UNBOUND:
     case ECS_INTERDOMAIN:
     case ECS_PIRQ:
-        chn->notify_vcpu_id = vcpu;
+        chn->notify_vcpu_id = vcpu_id;
         break;
     default:
         rc = -EINVAL;
@@ -638,7 +636,7 @@
         break;
 
     case EVTCHNOP_bind_vcpu:
-        rc = evtchn_bind_vcpu(&op.u.bind_vcpu);
+        rc = evtchn_bind_vcpu(op.u.bind_vcpu.port, op.u.bind_vcpu.vcpu);
         break;
 
     case EVTCHNOP_unmask:
diff -r 175ad739d8bc -r b5bb9920bf48 xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Wed Feb 22 20:52:30 2006
+++ b/xen/include/asm-x86/hvm/io.h      Thu Feb 23 10:22:25 2006
@@ -23,6 +23,7 @@
 #include <asm/hvm/vpic.h>
 #include <asm/hvm/vioapic.h>
 #include <public/hvm/ioreq.h>
+#include <public/event_channel.h>
 
 #define MAX_OPERAND_NUM 2
 
diff -r 175ad739d8bc -r b5bb9920bf48 xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Feb 22 20:52:30 2006
+++ b/xen/include/asm-x86/hvm/support.h Thu Feb 23 10:22:25 2006
@@ -40,9 +40,9 @@
     return &get_sp(d)->vcpu_iodata[cpu];
 }
 
-static inline int iopacket_port(struct domain *d)
+static inline int iopacket_port(struct vcpu *v)
 {
-    return get_sp(d)->sp_global.eport;
+    return get_vio(v->domain, v->vcpu_id)->vp_eport;
 }
 
 /* XXX these are really VMX specific */
diff -r 175ad739d8bc -r b5bb9920bf48 xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Wed Feb 22 20:52:30 2006
+++ b/xen/include/public/hvm/ioreq.h    Thu Feb 23 10:22:25 2006
@@ -53,6 +53,7 @@
     uint8_t dir:1;          /*  1=read, 0=write             */
     uint8_t df:1;
     uint8_t type;           /* I/O type                     */
+    uint64_t io_count;      /* How many IO done on a vcpu   */
 } ioreq_t;
 
 #define MAX_VECTOR      256
@@ -65,11 +66,13 @@
     uint16_t    pic_irr;
     uint16_t    pic_last_irr;
     uint16_t    pic_clear_irr;
-    int         eport; /* Event channel port */
 } global_iodata_t;
 
 typedef struct {
-    ioreq_t     vp_ioreq;
+    ioreq_t         vp_ioreq;
+    /* Event channel port */
+    unsigned long   vp_eport;   /* VMX vcpu uses this to notify DM */
+    unsigned long   dm_eport;   /* DM uses this to notify VMX vcpu */
 } vcpu_iodata_t;
 
 typedef struct {
diff -r 175ad739d8bc -r b5bb9920bf48 xen/include/xen/event.h
--- a/xen/include/xen/event.h   Wed Feb 22 20:52:30 2006
+++ b/xen/include/xen/event.h   Thu Feb 23 10:22:25 2006
@@ -63,4 +63,10 @@
     (!!(v)->vcpu_info->evtchn_upcall_pending &  \
       !(v)->vcpu_info->evtchn_upcall_mask)
 
+/* Send a notification from a local event-channel port. */
+extern long evtchn_send(unsigned int lport);
+
+/* Bind a local event-channel port to the specified VCPU. */
+extern long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id);
+
 #endif /* __XEN_EVENT_H__ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Per-vcpu IO evtchn patch for HVM domain., Xen patchbot -unstable <=