WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Big simplification of the Xen event-channel interface.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Big simplification of the Xen event-channel interface.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 07 Oct 2005 10:02:10 +0000
Delivery-date: Fri, 07 Oct 2005 09:59:40 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 37bea65ed6ca2faff4bf5f1b281b6cdc9df3522d
# Parent  48df3efaf61c2cf98dc87ab1b70bd505f0096a6a
Big simplification of the Xen event-channel interface.
EVTCHNOP_bind_interdomain in particular is much simpler.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Thu Oct  6 23:21:00 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c     Fri Oct  7 09:57:13 2005
@@ -219,7 +219,6 @@
        spin_lock(&irq_mapping_update_lock);
 
        if (--irq_bindcount[irq] == 0) {
-               op.u.close.dom  = DOMID_SELF;
                op.u.close.port = evtchn;
                BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
 
@@ -283,7 +282,6 @@
        spin_lock(&irq_mapping_update_lock);
 
        if (--irq_bindcount[irq] == 0) {
-               op.u.close.dom  = DOMID_SELF;
                op.u.close.port = evtchn;
                BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
 
@@ -326,7 +324,6 @@
        spin_lock(&irq_mapping_update_lock);
 
        if ((--irq_bindcount[irq] == 0) && (evtchn != -1)) {
-               op.u.close.dom  = DOMID_SELF;
                op.u.close.port = evtchn;
                BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
 
@@ -513,7 +510,10 @@
 static unsigned int startup_pirq(unsigned int irq)
 {
        evtchn_op_t op = { .cmd = EVTCHNOP_bind_pirq };
-       int evtchn;
+       int evtchn = irq_to_evtchn[irq];
+
+       if (VALID_EVTCHN(evtchn))
+               goto out;
 
        op.u.bind_pirq.pirq  = irq;
        /* NB. We are happy to share unless we are probing. */
@@ -532,6 +532,7 @@
        evtchn_to_irq[evtchn] = irq;
        irq_to_evtchn[irq]    = evtchn;
 
+ out:
        unmask_evtchn(evtchn);
        pirq_unmask_notify(irq_to_pirq(irq));
 
@@ -548,7 +549,6 @@
 
        mask_evtchn(evtchn);
 
-       op.u.close.dom  = DOMID_SELF;
        op.u.close.port = evtchn;
        BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
 
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Thu Oct  6 
23:21:00 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Fri Oct  7 
09:57:13 2005
@@ -71,10 +71,8 @@
        int err;
        evtchn_op_t op = {
                .cmd = EVTCHNOP_bind_interdomain,
-               .u.bind_interdomain.dom1 = DOMID_SELF,
-               .u.bind_interdomain.dom2 = blkif->domid,
-               .u.bind_interdomain.port1 = 0,
-               .u.bind_interdomain.port2 = evtchn };
+               .u.bind_interdomain.remote_dom = blkif->domid,
+               .u.bind_interdomain.remote_port = evtchn };
 
        if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
                return -ENOMEM;
@@ -92,7 +90,7 @@
                return err;
        }
 
-       blkif->evtchn = op.u.bind_interdomain.port1;
+       blkif->evtchn = op.u.bind_interdomain.local_port;
 
        sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
        SHARED_RING_INIT(sring);
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/drivers/xen/blktap/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Thu Oct  6 
23:21:00 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Fri Oct  7 
09:57:13 2005
@@ -71,10 +71,8 @@
        int err;
        evtchn_op_t op = {
                .cmd = EVTCHNOP_bind_interdomain,
-               .u.bind_interdomain.dom1 = DOMID_SELF,
-               .u.bind_interdomain.dom2 = blkif->domid,
-               .u.bind_interdomain.port1 = 0,
-               .u.bind_interdomain.port2 = evtchn };
+               .u.bind_interdomain.remote_dom  = blkif->domid,
+               .u.bind_interdomain.remote_port = evtchn };
 
        if ((blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL)
                return -ENOMEM;
@@ -92,7 +90,7 @@
                return err;
        }
 
-       blkif->evtchn = op.u.bind_interdomain.port1;
+       blkif->evtchn = op.u.bind_interdomain.local_port;
 
        sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
        SHARED_RING_INIT(sring);
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c
--- a/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c  Thu Oct  6 23:21:00 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c  Fri Oct  7 09:57:13 2005
@@ -223,7 +223,7 @@
                if (copy_from_user(&bind, (void *)arg, sizeof(bind)))
                        break;
 
-               op.cmd              = EVTCHNOP_bind_virq;
+               op.cmd = EVTCHNOP_bind_virq;
                op.u.bind_virq.virq = bind.virq;
                op.u.bind_virq.vcpu = 0;
                rc = HYPERVISOR_event_channel_op(&op);
@@ -243,16 +243,14 @@
                if (copy_from_user(&bind, (void *)arg, sizeof(bind)))
                        break;
 
-               op.cmd                      = EVTCHNOP_bind_interdomain;
-               op.u.bind_interdomain.dom1  = DOMID_SELF;
-               op.u.bind_interdomain.dom2  = bind.remote_domain;
-               op.u.bind_interdomain.port1 = 0;
-               op.u.bind_interdomain.port2 = bind.remote_port;
+               op.cmd = EVTCHNOP_bind_interdomain;
+               op.u.bind_interdomain.remote_dom  = bind.remote_domain;
+               op.u.bind_interdomain.remote_port = bind.remote_port;
                rc = HYPERVISOR_event_channel_op(&op);
                if (rc != 0)
                        break;
 
-               rc = op.u.bind_interdomain.port1;
+               rc = op.u.bind_interdomain.local_port;
                port_user[rc] = u;
                unmask_evtchn(rc);
                break;
@@ -265,7 +263,7 @@
                if (copy_from_user(&bind, (void *)arg, sizeof(bind)))
                        break;
 
-               op.cmd                        = EVTCHNOP_alloc_unbound;
+               op.cmd = EVTCHNOP_alloc_unbound;
                op.u.alloc_unbound.dom        = DOMID_SELF;
                op.u.alloc_unbound.remote_dom = bind.remote_domain;
                rc = HYPERVISOR_event_channel_op(&op);
@@ -292,6 +290,11 @@
                } else {
                        port_user[unbind.port] = NULL;
                        mask_evtchn(unbind.port);
+
+                       op.cmd = EVTCHNOP_close;
+                       op.u.close.port = unbind.port;
+                       BUG_ON(HYPERVISOR_event_channel_op(&op));
+
                        rc = 0;
                }
                break;
@@ -390,8 +393,7 @@
                port_user[i] = NULL;
                mask_evtchn(i);
 
-               op.cmd          = EVTCHNOP_close;
-               op.u.close.dom  = DOMID_SELF;
+               op.cmd = EVTCHNOP_close;
                op.u.close.port = i;
                BUG_ON(HYPERVISOR_event_channel_op(&op));
        }
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Thu Oct  6 
23:21:00 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Oct  7 
09:57:13 2005
@@ -180,10 +180,8 @@
        int err;
        evtchn_op_t op = {
                .cmd = EVTCHNOP_bind_interdomain,
-               .u.bind_interdomain.dom1 = DOMID_SELF,
-               .u.bind_interdomain.dom2 = netif->domid,
-               .u.bind_interdomain.port1 = 0,
-               .u.bind_interdomain.port2 = evtchn };
+               .u.bind_interdomain.remote_dom = netif->domid,
+               .u.bind_interdomain.remote_port = evtchn };
 
        netif->comms_area = alloc_vm_area(2*PAGE_SIZE);
        if (netif->comms_area == NULL)
@@ -202,7 +200,7 @@
                return err;
        }
 
-       netif->evtchn = op.u.bind_interdomain.port1;
+       netif->evtchn = op.u.bind_interdomain.local_port;
 
        netif->irq = bind_evtchn_to_irqhandler(
                netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c      Thu Oct  6 
23:21:00 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c      Fri Oct  7 
09:57:13 2005
@@ -120,10 +120,8 @@
        int err;
        evtchn_op_t op = {
                .cmd = EVTCHNOP_bind_interdomain,
-               .u.bind_interdomain.dom1 = DOMID_SELF,
-               .u.bind_interdomain.dom2 = tpmif->domid,
-               .u.bind_interdomain.port1 = 0,
-               .u.bind_interdomain.port2 = evtchn };
+               .u.bind_interdomain.remote_dom = tpmif->domid,
+               .u.bind_interdomain.remote_port = evtchn };
 
        if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
                return -ENOMEM;
@@ -141,7 +139,7 @@
                return err;
        }
 
-       tpmif->evtchn = op.u.bind_interdomain.port1;
+       tpmif->evtchn = op.u.bind_interdomain.local_port;
 
        tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
 
diff -r 48df3efaf61c -r 37bea65ed6ca 
linux-2.6-xen-sparse/include/asm-xen/evtchn.h
--- a/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Thu Oct  6 23:21:00 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/evtchn.h     Fri Oct  7 09:57:13 2005
@@ -123,9 +123,9 @@
 
 static inline void notify_remote_via_evtchn(int port)
 {
-       evtchn_op_t op = {
-               .cmd = EVTCHNOP_send,
-               .u.send.local_port = port };
+       evtchn_op_t op;
+       op.cmd         = EVTCHNOP_send,
+       op.u.send.port = port;
        (void)HYPERVISOR_event_channel_op(&op);
 }
 
diff -r 48df3efaf61c -r 37bea65ed6ca tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Thu Oct  6 23:21:00 2005
+++ b/tools/ioemu/vl.c  Fri Oct  7 09:57:13 2005
@@ -2806,7 +2806,7 @@
 
             case QEMU_OPTION_p:
                 {
-                  extern short ioreq_remote_port;
+                  extern u16 ioreq_remote_port;
                   ioreq_remote_port = atoi(optarg);
                   printf("port: %d\n", ioreq_remote_port);
                 }
diff -r 48df3efaf61c -r 37bea65ed6ca tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Thu Oct  6 23:21:00 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Oct  7 09:57:13 2005
@@ -432,13 +432,13 @@
 {
     XcObject *xc = (XcObject *)self;
 
-    u32 dom = DOMID_SELF, remote_dom;
+    u32 dom, remote_dom;
     int port;
 
-    static char *kwd_list[] = { "remote_dom", "dom", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
-                                      &remote_dom, &dom) )
+    static char *kwd_list[] = { "dom", "remote_dom", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list,
+                                      &dom, &remote_dom) )
         return NULL;
 
     if ( (port = xc_evtchn_alloc_unbound(xc->xc_handle, dom, remote_dom)) < 0 )
@@ -943,8 +943,9 @@
     { "evtchn_alloc_unbound", 
       (PyCFunction)pyxc_evtchn_alloc_unbound,
       METH_VARARGS | METH_KEYWORDS, "\n"
-      "Allocate an unbound local port that will await a remote connection.\n"
-      " dom [int]: Remote domain to accept connections from.\n\n"
+      "Allocate an unbound port that will await a remote connection.\n"
+      " dom        [int]: Domain whose port space to allocate from.\n"
+      " remote_dom [int]: Remote domain to accept connections from.\n\n"
       "Returns: [int] Unbound event-channel port.\n" },
 
     { "evtchn_status", 
diff -r 48df3efaf61c -r 37bea65ed6ca tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Thu Oct  6 23:21:00 2005
+++ b/tools/python/xen/xend/XendDomainInfo.py   Fri Oct  7 09:57:13 2005
@@ -197,6 +197,7 @@
         log.info("Recreating domain %d with new UUID %s.", domid, uuid)
 
         vm = XendDomainInfo(uuid, xeninfo, domid, True)
+        vm.removeDom()
         vm.storeVmDetails()
         vm.storeDomDetails()
 
diff -r 48df3efaf61c -r 37bea65ed6ca xen/common/event_channel.c
--- a/xen/common/event_channel.c        Thu Oct  6 23:21:00 2005
+++ b/xen/common/event_channel.c        Fri Oct  7 09:57:13 2005
@@ -70,7 +70,7 @@
 {
     struct evtchn *chn;
     struct domain *d;
-    int            port = alloc->port;
+    int            port;
     domid_t        dom = alloc->dom;
     long           rc = 0;
 
@@ -84,176 +84,80 @@
 
     spin_lock(&d->evtchn_lock);
 
-    /* Obtain, or ensure that we already have, a valid <port>. */
-    if ( port == 0 )
-    {
-        if ( (port = get_free_port(d)) < 0 )
-            ERROR_EXIT(port);
-    }
-    else if ( !port_is_valid(d, port) )
+    if ( (port = get_free_port(d)) < 0 )
+        ERROR_EXIT(port);
+    chn = evtchn_from_port(d, port);
+
+    chn->state = ECS_UNBOUND;
+    chn->u.unbound.remote_domid = alloc->remote_dom;
+
+    alloc->port = port;
+
+ out:
+    spin_unlock(&d->evtchn_lock);
+
+    put_domain(d);
+
+    return rc;
+}
+
+
+static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
+{
+    struct evtchn *lchn, *rchn;
+    struct domain *ld = current->domain, *rd;
+    int            lport, rport = bind->remote_port;
+    long           rc = 0;
+
+    if ( (rd = find_domain_by_id(bind->remote_dom)) == NULL )
+        return -ESRCH;
+
+    /* Avoid deadlock by first acquiring lock of domain with smaller id. */
+    if ( ld < rd )
+    {
+        spin_lock(&ld->evtchn_lock);
+        spin_lock(&rd->evtchn_lock);
+    }
+    else
+    {
+        if ( ld != rd )
+            spin_lock(&rd->evtchn_lock);
+        spin_lock(&ld->evtchn_lock);
+    }
+
+    if ( (lport = get_free_port(ld)) < 0 )
+        ERROR_EXIT(lport);
+    lchn = evtchn_from_port(ld, lport);
+
+    if ( !port_is_valid(rd, rport) )
         ERROR_EXIT(-EINVAL);
-    chn = evtchn_from_port(d, port);
-
-    /* Validate channel's current state. */
-    switch ( chn->state )
-    {
-    case ECS_FREE:
-        chn->state = ECS_UNBOUND;
-        chn->u.unbound.remote_domid = alloc->remote_dom;
-        break;
-
-    case ECS_UNBOUND:
-        if ( chn->u.unbound.remote_domid != alloc->remote_dom )
-            ERROR_EXIT(-EINVAL);
-        break;
-
-    default:
+    rchn = evtchn_from_port(rd, rport);
+    if ( (rchn->state != ECS_UNBOUND) ||
+         (rchn->u.unbound.remote_domid != ld->domain_id) )
         ERROR_EXIT(-EINVAL);
-    }
-
- out:
-    spin_unlock(&d->evtchn_lock);
-
-    put_domain(d);
-
-    alloc->port = port;
-
-    return rc;
-}
-
-
-static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
-{
-    struct evtchn *chn1, *chn2;
-    struct domain *d1, *d2;
-    int            port1 = bind->port1, port2 = bind->port2;
-    domid_t        dom1 = bind->dom1, dom2 = bind->dom2;
-    long           rc = 0;
-
-    if ( !IS_PRIV(current->domain) && (dom1 != DOMID_SELF) )
-        return -EPERM;
-
-    if ( dom1 == DOMID_SELF )
-        dom1 = current->domain->domain_id;
-    if ( dom2 == DOMID_SELF )
-        dom2 = current->domain->domain_id;
-
-    if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
-         ((d2 = find_domain_by_id(dom2)) == NULL) )
-    {
-        if ( d1 != NULL )
-            put_domain(d1);
-        return -ESRCH;
-    }
-
-    /* Avoid deadlock by first acquiring lock of domain with smaller id. */
-    if ( d1 < d2 )
-    {
-        spin_lock(&d1->evtchn_lock);
-        spin_lock(&d2->evtchn_lock);
-    }
-    else
-    {
-        if ( d1 != d2 )
-            spin_lock(&d2->evtchn_lock);
-        spin_lock(&d1->evtchn_lock);
-    }
-
-    /* Obtain, or ensure that we already have, a valid <port1>. */
-    if ( port1 == 0 )
-    {
-        if ( (port1 = get_free_port(d1)) < 0 )
-            ERROR_EXIT(port1);
-    }
-    else if ( !port_is_valid(d1, port1) )
-        ERROR_EXIT(-EINVAL);
-    chn1 = evtchn_from_port(d1, port1);
-
-    /* Obtain, or ensure that we already have, a valid <port2>. */
-    if ( port2 == 0 )
-    {
-        /* Make port1 non-free while we allocate port2 (in case dom1==dom2). */
-        u16 state = chn1->state;
-        chn1->state = ECS_INTERDOMAIN;
-        port2 = get_free_port(d2);
-        chn1->state = state;
-        if ( port2 < 0 )
-            ERROR_EXIT(port2);
-    }
-    else if ( !port_is_valid(d2, port2) )
-        ERROR_EXIT(-EINVAL);
-    chn2 = evtchn_from_port(d2, port2);
-
-    /* Validate <dom1,port1>'s current state. */
-    switch ( chn1->state )
-    {
-    case ECS_FREE:
-        break;
-
-    case ECS_UNBOUND:
-        if ( chn1->u.unbound.remote_domid != dom2 )
-            ERROR_EXIT(-EINVAL);
-        break;
-
-    case ECS_INTERDOMAIN:
-        if ( chn1->u.interdomain.remote_dom != d2 )
-            ERROR_EXIT(-EINVAL);
-        if ( (chn1->u.interdomain.remote_port != port2) && (bind->port2 != 0) )
-            ERROR_EXIT(-EINVAL);
-        port2 = chn1->u.interdomain.remote_port;
-        goto out;
-
-    default:
-        ERROR_EXIT(-EINVAL);
-    }
-
-    /* Validate <dom2,port2>'s current state. */
-    switch ( chn2->state )
-    {
-    case ECS_FREE:
-        if ( !IS_PRIV(current->domain) && (dom2 != DOMID_SELF) )
-            ERROR_EXIT(-EPERM);
-        break;
-
-    case ECS_UNBOUND:
-        if ( chn2->u.unbound.remote_domid != dom1 )
-            ERROR_EXIT(-EINVAL);
-        break;
-
-    case ECS_INTERDOMAIN:
-        if ( chn2->u.interdomain.remote_dom != d1 )
-            ERROR_EXIT(-EINVAL);
-        if ( (chn2->u.interdomain.remote_port != port1) && (bind->port1 != 0) )
-            ERROR_EXIT(-EINVAL);
-        port1 = chn2->u.interdomain.remote_port;
-        goto out;
-
-    default:
-        ERROR_EXIT(-EINVAL);
-    }
+
+    lchn->u.interdomain.remote_dom  = rd;
+    lchn->u.interdomain.remote_port = (u16)rport;
+    lchn->state                     = ECS_INTERDOMAIN;
+    
+    rchn->u.interdomain.remote_dom  = ld;
+    rchn->u.interdomain.remote_port = (u16)lport;
+    rchn->state                     = ECS_INTERDOMAIN;
 
     /*
-     * Everything checked out okay -- bind <dom1,port1> to <dom2,port2>.
+     * We may have lost notifications on the remote unbound port. Fix that up
+     * here by conservatively always setting a notification on the local port.
      */
-
-    chn1->u.interdomain.remote_dom  = d2;
-    chn1->u.interdomain.remote_port = (u16)port2;
-    chn1->state                     = ECS_INTERDOMAIN;
+    evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+
+    bind->local_port = lport;
+
+ out:
+    spin_unlock(&ld->evtchn_lock);
+    if ( ld != rd )
+        spin_unlock(&rd->evtchn_lock);
     
-    chn2->u.interdomain.remote_dom  = d1;
-    chn2->u.interdomain.remote_port = (u16)port1;
-    chn2->state                     = ECS_INTERDOMAIN;
-
- out:
-    spin_unlock(&d1->evtchn_lock);
-    if ( d1 != d2 )
-        spin_unlock(&d2->evtchn_lock);
-    
-    put_domain(d1);
-    put_domain(d2);
-
-    bind->port1 = port1;
-    bind->port2 = port2;
+    put_domain(rd);
 
     return rc;
 }
@@ -264,39 +168,34 @@
     struct evtchn *chn;
     struct vcpu   *v;
     struct domain *d = current->domain;
-    int            port, virq = bind->virq;
+    int            port, virq = bind->virq, vcpu = bind->vcpu;
+    long           rc = 0;
 
     if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) )
         return -EINVAL;
 
-    if ( (v = d->vcpu[bind->vcpu]) == NULL )
+    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) )
         return -ENOENT;
 
     spin_lock(&d->evtchn_lock);
 
-    /*
-     * Port 0 is the fallback port for VIRQs that haven't been explicitly
-     * bound yet.
-     */
-    if ( ((port = v->virq_to_evtchn[virq]) != 0) ||
-         ((port = get_free_port(d)) < 0) )
-        goto out;
+    if ( v->virq_to_evtchn[virq] != 0 )
+        ERROR_EXIT(-EEXIST);
+
+    if ( (port = get_free_port(d)) < 0 )
+        ERROR_EXIT(port);
 
     chn = evtchn_from_port(d, port);
     chn->state          = ECS_VIRQ;
-    chn->notify_vcpu_id = v->vcpu_id;
+    chn->notify_vcpu_id = vcpu;
     chn->u.virq         = virq;
 
-    v->virq_to_evtchn[virq] = port;
+    v->virq_to_evtchn[virq] = bind->port = port;
 
  out:
     spin_unlock(&d->evtchn_lock);
 
-    if ( port < 0 )
-        return port;
-
-    bind->port = port;
-    return 0;
+    return rc;
 }
 
 
@@ -304,27 +203,27 @@
 {
     struct evtchn *chn;
     struct domain *d = current->domain;
-    int            port;
-
-    if ( d->vcpu[bind->vcpu] == NULL )
+    int            port, vcpu = bind->vcpu;
+    long           rc = 0;
+
+    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
         return -ENOENT;
 
     spin_lock(&d->evtchn_lock);
 
-    if ( (port = get_free_port(d)) >= 0 )
-    {
-        chn = evtchn_from_port(d, port);
-        chn->state          = ECS_IPI;
-        chn->notify_vcpu_id = bind->vcpu;
-    }
-
+    if ( (port = get_free_port(d)) < 0 )
+        ERROR_EXIT(port);
+
+    chn = evtchn_from_port(d, port);
+    chn->state          = ECS_IPI;
+    chn->notify_vcpu_id = vcpu;
+
+    bind->port = port;
+
+ out:
     spin_unlock(&d->evtchn_lock);
 
-    if ( port < 0 )
-        return port;
-
-    bind->port = port;
-    return 0;
+    return rc;
 }
 
 
@@ -332,16 +231,19 @@
 {
     struct evtchn *chn;
     struct domain *d = current->domain;
-    int            port, rc, pirq = bind->pirq;
+    int            port, pirq = bind->pirq;
+    long           rc;
 
     if ( pirq >= ARRAY_SIZE(d->pirq_to_evtchn) )
         return -EINVAL;
 
     spin_lock(&d->evtchn_lock);
 
-    if ( ((rc = port = d->pirq_to_evtchn[pirq]) != 0) ||
-         ((rc = port = get_free_port(d)) < 0) )
-        goto out;
+    if ( d->pirq_to_evtchn[pirq] != 0 )
+        ERROR_EXIT(-EEXIST);
+
+    if ( (port = get_free_port(d)) < 0 )
+        ERROR_EXIT(port);
 
     chn = evtchn_from_port(d, port);
 
@@ -357,14 +259,12 @@
     chn->state  = ECS_PIRQ;
     chn->u.pirq = pirq;
 
+    bind->port = port;
+
  out:
     spin_unlock(&d->evtchn_lock);
 
-    if ( rc < 0 )
-        return rc;
-
-    bind->port = port;
-    return 0;
+    return rc;
 }
 
 
@@ -478,22 +378,7 @@
 
 static long evtchn_close(evtchn_close_t *close)
 {
-    struct domain *d;
-    long           rc;
-    domid_t        dom = close->dom;
-
-    if ( dom == DOMID_SELF )
-        dom = current->domain->domain_id;
-    else if ( !IS_PRIV(current->domain) )
-        return -EPERM;
-
-    if ( (d = find_domain_by_id(dom)) == NULL )
-        return -ESRCH;
-
-    rc = __evtchn_close(d, close->port);
-
-    put_domain(d);
-    return rc;
+    return __evtchn_close(current->domain, close->port);
 }
 
 
@@ -523,6 +408,9 @@
     case ECS_IPI:
         evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
         break;
+    case ECS_UNBOUND:
+        /* silently drop the notification */
+        break;
     default:
         ret = -EINVAL;
     }
@@ -611,9 +499,8 @@
     struct evtchn *chn;
     long           rc = 0;
 
-    if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] == NULL) ) {
-        return -EINVAL;
-    }
+    if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu] == NULL) )
+        return -ENOENT;
 
     spin_lock(&d->evtchn_lock);
 
@@ -689,7 +576,7 @@
         break;
 
     case EVTCHNOP_send:
-        rc = evtchn_send(op.u.send.local_port);
+        rc = evtchn_send(op.u.send.port);
         break;
 
     case EVTCHNOP_status:
diff -r 48df3efaf61c -r 37bea65ed6ca xen/include/public/event_channel.h
--- a/xen/include/public/event_channel.h        Thu Oct  6 23:21:00 2005
+++ b/xen/include/public/event_channel.h        Fri Oct  7 09:57:13 2005
@@ -10,9 +10,9 @@
 #define __XEN_PUBLIC_EVENT_CHANNEL_H__
 
 /*
- * EVTCHNOP_alloc_unbound: Allocate a port in <dom> for later binding to
- * <remote_dom>. <port> may be wildcarded by setting to zero, in which case a
- * fresh port will be allocated, and the field filled in on return.
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
  * NOTES:
  *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
  */
@@ -20,36 +20,24 @@
 typedef struct evtchn_alloc_unbound {
     /* IN parameters */
     domid_t dom, remote_dom;
-    /* IN/OUT parameters */
+    /* OUT parameters */
     u32     port;
 } evtchn_alloc_unbound_t;
 
 /*
  * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
- * <dom1> and <dom2>. Either <port1> or <port2> may be wildcarded by setting to
- * zero. On successful return both <port1> and <port2> are filled in and
- * <dom1,port1> is fully bound to <dom2,port2>.
- * 
- * NOTES:
- *  1. A wildcarded port is allocated from the relevant domain's free list
- *     (i.e., some port that was previously EVTCHNSTAT_closed). However, if the
- *     remote port pair is already fully bound then a port is not allocated,
- *     and instead the existing local port is returned to the caller.
- *  2. If the caller is unprivileged then <dom1> must be DOMID_SELF.
- *  3. If the caller is unprivileged and <dom2,port2> is EVTCHNSTAT_closed
- *     then <dom2> must be DOMID_SELF.
- *  4. If either port is already bound then it must be bound to the other
- *     specified domain and port (if not wildcarded).
- *  5. If either port is awaiting binding (EVTCHNSTAT_unbound) then it must
- *     be awaiting binding to the other domain, and the other port pair must
- *     be closed or unbound.
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
  */
 #define EVTCHNOP_bind_interdomain 0
 typedef struct evtchn_bind_interdomain {
     /* IN parameters. */
-    domid_t dom1, dom2;
-    /* IN/OUT parameters. */
-    u32     port1, port2;
+    domid_t remote_dom;
+    u32     remote_port;
+    /* OUT parameters. */
+    u32     local_port;
 } evtchn_bind_interdomain_t;
 
 /*
@@ -99,31 +87,24 @@
 } evtchn_bind_ipi_t;
 
 /*
- * EVTCHNOP_close: Close the communication channel which has an endpoint at
- * <dom, port>. If the channel is interdomain then the remote end is placed in
- * the unbound state (EVTCHNSTAT_unbound), awaiting a new connection.
- * NOTES:
- *  1. <dom> may be specified as DOMID_SELF.
- *  2. Only a sufficiently-privileged domain may close an event channel
- *     for which <dom> is not DOMID_SELF.
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
  */
 #define EVTCHNOP_close            3
 typedef struct evtchn_close {
     /* IN parameters. */
-    domid_t dom;
-    u32     port;
-    /* No OUT parameters. */
+    u32 port;
 } evtchn_close_t;
 
 /*
  * EVTCHNOP_send: Send an event to the remote end of the channel whose local
- * endpoint is <DOMID_SELF, local_port>.
+ * endpoint is <port>.
  */
 #define EVTCHNOP_send             4
 typedef struct evtchn_send {
     /* IN parameters. */
-    u32     local_port;
-    /* No OUT parameters. */
+    u32 port;
 } evtchn_send_t;
 
 /*

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Big simplification of the Xen event-channel interface., Xen patchbot -unstable <=