WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 3/8] [libxc] Domain Groups: libxc handlers for group

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 3/8] [libxc] Domain Groups: libxc handlers for group data
From: Chris <hap10@xxxxxxxxxxxxxx>
Date: Tue, 20 Feb 2007 14:56:06 -0500
Delivery-date: Tue, 20 Feb 2007 11:58:05 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 1.5.0.9 (Macintosh/20061207)
libxc:

Augment libxc framework to shuttle group data and commands between xend
and the VMM.




diff -r ecb6cd61a9cf tools/libxc/Makefile
--- a/tools/libxc/Makefile      Tue Feb 20 12:27:03 2007 +0000
+++ b/tools/libxc/Makefile      Tue Feb 20 12:59:11 2007 -0500
@@ -7,6 +7,7 @@ CTRL_SRCS-y       :=
 CTRL_SRCS-y       :=
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-y       += xc_domain.c
+CTRL_SRCS-y       += xc_domain_group.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
 CTRL_SRCS-y       += xc_acm.c   
diff -r ecb6cd61a9cf tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Feb 20 12:27:03 2007 +0000
+++ b/tools/libxc/xc_domain.c   Tue Feb 20 12:59:11 2007 -0500
@@ -191,6 +191,7 @@ int xc_domain_getinfo(int xc_handle,
             info->crashed  = 1;
         }
 
+        info->grpid = domctl.u.getdomaininfo.group;
         info->ssidref  = domctl.u.getdomaininfo.ssidref;
         info->nr_pages = domctl.u.getdomaininfo.tot_pages;
         info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10);
@@ -201,6 +202,9 @@ int xc_domain_getinfo(int xc_handle,
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
+
+        memcpy(info->dg_handle, domctl.u.getdomaininfo.dg_handle,
+               sizeof(xen_domain_group_handle_t));
 
         next_domid = (uint16_t)domctl.domain + 1;
         info++;
diff -r ecb6cd61a9cf tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Tue Feb 20 12:27:03 2007 +0000
+++ b/tools/libxc/xc_private.h  Tue Feb 20 12:59:11 2007 -0500
@@ -23,10 +23,12 @@
 #ifdef VALGRIND
 #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall = { 0 }
 #define DECLARE_DOMCTL struct xen_domctl domctl = { 0 }
+#define DECLARE_DOMGRPCTL struct xen_domgrpctl domgrpctl = { 0 }
 #define DECLARE_SYSCTL struct xen_sysctl sysctl = { 0 }
 #else
 #define DECLARE_HYPERCALL privcmd_hypercall_t hypercall
 #define DECLARE_DOMCTL struct xen_domctl domctl
+#define DECLARE_DOMGRPCTL struct xen_domgrpctl domgrpctl
 #define DECLARE_SYSCTL struct xen_sysctl sysctl
 #endif
 
@@ -118,6 +120,35 @@ static inline int do_domctl(int xc_handl
     return ret;
 }
 
+static inline int do_domgrpctl(int xc_handle, struct xen_domgrpctl *domgrpctl)
+{
+    int ret = -1;
+    DECLARE_HYPERCALL;
+
+    domgrpctl->interface_version = XEN_DOMGRPCTL_INTERFACE_VERSION;
+
+    hypercall.op     = __HYPERVISOR_domgrpctl;
+    hypercall.arg[0] = (unsigned long)domgrpctl;
+
+    if ( mlock(domgrpctl, sizeof(*domgrpctl)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
+
+    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+    {
+        if ( errno == EACCES )
+            DPRINTF("domgrpctl operation failed -- need to"
+                    " rebuild the user-space tool set?\n");
+    }
+
+    safe_munlock(domgrpctl, sizeof(*domgrpctl));
+
+ out1:
+    return ret;
+}
+
 static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
 {
     int ret = -1;
diff -r ecb6cd61a9cf tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Feb 20 12:27:03 2007 +0000
+++ b/tools/libxc/xenctrl.h     Tue Feb 20 12:59:11 2007 -0500
@@ -18,6 +18,7 @@
 #include <stdint.h>
 #include <xen/xen.h>
 #include <xen/domctl.h>
+#include <xen/domgrpctl.h>
 #include <xen/sysctl.h>
 #include <xen/version.h>
 #include <xen/event_channel.h>
@@ -147,6 +148,7 @@ int xc_waitdomain(
 
 typedef struct xc_dominfo {
     uint32_t      domid;
+    uint32_t      grpid;
     uint32_t      ssidref;
     unsigned int  dying:1, crashed:1, shutdown:1,
                   paused:1, blocked:1, running:1,
@@ -159,7 +161,15 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    xen_domain_group_handle_t dg_handle;
 } xc_dominfo_t;
+
+typedef struct{
+    dgid_t        dgid;
+    uint16_t      size;
+    domid_t       member_list[MAX_GROUP_SIZE];
+    xen_domain_group_handle_t handle;
+} xc_grpinfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
 int xc_domain_create(int xc_handle,
@@ -292,6 +302,27 @@ int xc_domain_getinfo(int xc_handle,
                       unsigned int max_doms,
                       xc_dominfo_t *info);
 
+int xc_domain_group_getinfo(int xc_handle,
+                            uint32_t first_dgid,
+                           unsigned int max_grps,
+                           xc_grpinfo_t *info);
+
+int xc_domain_group_create(int xc_handle,
+                           xen_domain_group_handle_t handle,
+                           uint32_t *pdgid);
+
+int xc_domain_group_pause(int xc_handle,
+                          uint32_t dgid);
+
+int xc_domain_group_unpause(int xc_handle,
+                            uint32_t dgid);
+
+int xc_domain_group_destroy(int xc_handle,
+                            uint32_t dgid);
+
+int xc_domain_group_join(int xc_handle,
+                         uint32_t domid,
+                         uint32_t dgid);
 
 /**
  * This function will set the execution context for the specified vcpu.
diff -r ecb6cd61a9cf tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Tue Feb 20 12:27:03 2007 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c Tue Feb 20 12:59:11 2007 -0500
@@ -36,10 +36,6 @@ typedef struct {
     int xc_handle;
 } XcObject;
 
-
-static PyObject *dom_op(XcObject *self, PyObject *args,
-                        int (*fn)(int, uint32_t));
-
 static PyObject *pyxc_error_to_exception(void)
 {
     PyObject *pyerr;
@@ -59,6 +55,21 @@ static PyObject *pyxc_error_to_exception
     PyErr_SetObject(xc_error_obj, pyerr);
 
     return NULL;
+}
+
+static PyObject *xcop(XcObject *self, PyObject *args,
+                    int (*fn)(int, uint32_t))
+{
+    uint32_t id; /* used for both domid and grpid */
+
+    if (!PyArg_ParseTuple(args, "i", &id))
+        return NULL;
+
+    if (fn(self->xc_handle, id) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
 }
 
 static PyObject *pyxc_domain_dumpcore(XcObject *self, PyObject *args)
@@ -147,17 +158,17 @@ static PyObject *pyxc_domain_max_vcpus(X
 
 static PyObject *pyxc_domain_pause(XcObject *self, PyObject *args)
 {
-    return dom_op(self, args, xc_domain_pause);
+    return xcop(self, args, xc_domain_pause);
 }
 
 static PyObject *pyxc_domain_unpause(XcObject *self, PyObject *args)
 {
-    return dom_op(self, args, xc_domain_unpause);
+    return xcop(self, args, xc_domain_unpause);
 }
 
 static PyObject *pyxc_domain_destroy(XcObject *self, PyObject *args)
 {
-    return dom_op(self, args, xc_domain_destroy);
+    return xcop(self, args, xc_domain_destroy);
 }
 
 static PyObject *pyxc_domain_shutdown(XcObject *self, PyObject *args)
@@ -176,7 +187,7 @@ static PyObject *pyxc_domain_shutdown(Xc
 
 static PyObject *pyxc_domain_resume(XcObject *self, PyObject *args)
 {
-    return dom_op(self, args, xc_domain_resume);
+    return xcop(self, args, xc_domain_resume);
 }
 
 static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
@@ -295,11 +306,15 @@ static PyObject *pyxc_domain_getinfo(XcO
     for ( i = 0 ; i < nr_doms; i++ )
     {
         PyObject *pyhandle = PyList_New(sizeof(xen_domain_handle_t));
+        PyObject *pydg_handle = PyList_New(sizeof(xen_domain_group_handle_t));
         for ( j = 0; j < sizeof(xen_domain_handle_t); j++ )
             PyList_SetItem(pyhandle, j, PyInt_FromLong(info[i].handle[j]));
+        for ( j = 0; j < sizeof(xen_domain_group_handle_t); j++ )
+            PyList_SetItem(pydg_handle, j, 
PyInt_FromLong(info[i].dg_handle[j]));
         info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
-                                  ",s:l,s:L,s:l,s:i,s:i}",
+                                  ",s:i,s:l,s:L,s:l,s:i,s:i}",
                                   "domid",       info[i].domid,
+                                  "dgid",      info[i].grpid,
                                   "online_vcpus", info[i].nr_online_vcpus,
                                   "max_vcpu_id", info[i].max_vcpu_id,
                                   "hvm",       info[i].hvm,
@@ -315,6 +330,7 @@ static PyObject *pyxc_domain_getinfo(XcO
                                   "ssidref",   info[i].ssidref,
                                   "shutdown_reason", info[i].shutdown_reason);
         PyDict_SetItemString(info_dict, "handle", pyhandle);
+        PyDict_SetItemString(info_dict, "dg_handle", pydg_handle);
         Py_DECREF(pyhandle);
         PyList_SetItem(list, i, info_dict);
     }
@@ -936,21 +952,6 @@ static PyObject *pyxc_domain_set_time_of
     return zero;
 }
 
-static PyObject *dom_op(XcObject *self, PyObject *args,
-                        int (*fn)(int, uint32_t))
-{
-    uint32_t dom;
-
-    if (!PyArg_ParseTuple(args, "i", &dom))
-        return NULL;
-
-    if (fn(self->xc_handle, dom) != 0)
-        return pyxc_error_to_exception();
-
-    Py_INCREF(zero);
-    return zero;
-}
-
 #ifdef __powerpc__
 static PyObject *pyxc_alloc_real_mode_area(XcObject *self,
                                            PyObject *args,
@@ -1013,6 +1014,148 @@ static PyObject *pyxc_prose_build(XcObje
 }
 #endif /* powerpc */
 
+#define EXTRACT_DOM_LIST(list_name, dict)                              \
+        dom_list = PyList_New(0);                                      \
+        for ( j = 0; j < info[i].size; j++ )                           \
+            PyList_Append(dom_list, PyInt_FromLong(info[i].list_name[j]));\
+        PyDict_SetItemString(dict, #list_name, dom_list);              \
+        Py_DECREF(dom_list);
+
+static PyObject *pyxc_domain_group_getinfo(XcObject *self,
+                                     PyObject *args,
+                                     PyObject *kwds)
+{
+    PyObject *list, *info_dict, *dom_list, *pyhandle;
+
+    uint32_t first_grp = 0;
+    /* max_grps is unrealistically large and causes a large heap allocation 
+       for the duration of this function that, in the vast majority of cases, 
+       will be very sparsely populated with information about real groups.  
+
+       Leaving this alone for now to keep an equal limit on max number of 
+       groups in both the VMM and the control stack. 
+
+       Could add a new case to the domain group control hypercall to return 
+       the current number of groups instead of assuming the worst case...
+    */
+    int max_grps = NULL_GROUP_ID+1, nr_grps, i, j;
+    xc_grpinfo_t *info;
+
+    static char *kwd_list[] = { "first_grp", "max_grps", NULL };
+
+    /* pull values from python args */
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
+                                      &first_grp, &max_grps) )
+        return NULL;
+
+    /* alloc space for the group info and ask Xen (via libxc) for the info */
+    if ( (info = malloc(max_grps * sizeof(xc_grpinfo_t))) == NULL )
+        return PyErr_NoMemory();
+    nr_grps = xc_domain_group_getinfo(self->xc_handle, first_grp, max_grps,
+                                      info);
+
+    if (nr_grps < 0) {
+        free(info);
+        return PyErr_SetFromErrno(xc_error_obj);
+    }
+
+    /* iterate over the returned groups and 
+       put the returned values into python objects */
+    list = PyList_New(nr_grps);
+    for ( i = 0 ; i < nr_grps; i++ ) {
+       /* extract group ID and size */
+        info_dict = Py_BuildValue(
+                       "{s:i,s:i}", 
+                       "dgid", info[i].dgid, 
+                       "size", info[i].size);
+
+       EXTRACT_DOM_LIST(member_list, info_dict);
+
+       /* extract the group's handle */        
+        pyhandle = PyList_New(sizeof(xen_domain_group_handle_t));
+        for ( j = 0; j < sizeof(xen_domain_group_handle_t); j++ )
+            PyList_SetItem(pyhandle, j, PyInt_FromLong(info[i].handle[j]));
+        PyDict_SetItemString(info_dict, "dg_handle", pyhandle);
+        Py_DECREF(pyhandle);
+
+        PyList_SetItem(list, i, info_dict);
+    }
+
+    free(info);
+
+    return list;
+}
+
+static PyObject *pyxc_domain_group_create(XcObject *self,
+                                    PyObject *args,
+                                    PyObject *kwds)
+{
+    uint32_t dgid = 0;
+    int i;
+    PyObject *pyhandle = NULL;
+    xen_domain_group_handle_t handle = {
+        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
+        0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
+
+    static char *kwd_list[] = { "handle", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwd_list,
+                                      &pyhandle))
+        return NULL;
+
+    if ( pyhandle != NULL )
+    {
+        if ( !PyList_Check(pyhandle) ||
+             (PyList_Size(pyhandle) != sizeof(xen_domain_group_handle_t)) )
+            goto out_exception;
+
+        for ( i = 0; i < sizeof(xen_domain_group_handle_t); i++ )
+        {
+            PyObject *p = PyList_GetItem(pyhandle, i);
+            if ( !PyInt_Check(p) )
+                goto out_exception;
+            handle[i] = (uint8_t)PyInt_AsLong(p);
+        }
+    } else
+       goto out_exception;
+
+    if ( (xc_domain_group_create(self->xc_handle, handle, &dgid)) < 0 )
+        return PyErr_SetFromErrno(xc_error_obj);
+
+    return PyInt_FromLong(dgid);
+
+out_exception:
+    errno = EINVAL;
+    PyErr_SetFromErrno(xc_error_obj);
+    return NULL;
+}
+
+static PyObject *pyxc_domain_group_pause(XcObject *self, PyObject *args)
+{
+    return xcop(self, args, xc_domain_group_pause);
+}
+
+static PyObject *pyxc_domain_group_unpause(XcObject *self, PyObject *args)
+{
+    return xcop(self, args, xc_domain_group_unpause);
+}
+
+static PyObject *pyxc_domain_group_destroy(XcObject *self, PyObject *args)
+{
+    return xcop(self, args, xc_domain_group_destroy);
+}
+
+static PyObject *pyxc_domain_group_join(XcObject *self, PyObject *args)
+{
+    uint32_t dgid, domid;
+    if (!PyArg_ParseTuple(args, "ii", &domid, &dgid))
+        return NULL;
+    if (xc_domain_group_join(self->xc_handle, domid, dgid) != 0)
+        return PyErr_SetFromErrno(xc_error_obj);
+    Py_INCREF(zero);
+    return zero;
+}
+
 static PyMethodDef pyxc_methods[] = {
     { "handle",
       (PyCFunction)pyxc_handle,
@@ -1027,6 +1170,13 @@ static PyMethodDef pyxc_methods[] = {
       " dom    [int, 0]:        Domain identifier to use (allocated if 
zero).\n"
       "Returns: [int] new domain identifier; -1 on error.\n" },
 
+    { "domain_group_create", 
+      (PyCFunction)pyxc_domain_group_create, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Create a new domain group.\n"
+      " grp    [int, 0]:        Domain group identifier to use (allocated if 
zero).\n"
+      "Returns: [int] new domain group identifier; -1 on error.\n" },
+
     { "domain_max_vcpus", 
       (PyCFunction)pyxc_domain_max_vcpus,
       METH_VARARGS, "\n"
@@ -1050,6 +1200,13 @@ static PyMethodDef pyxc_methods[] = {
       " dom [int]: Identifier of domain to be paused.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
+    { "domain_group_pause", 
+      (PyCFunction)pyxc_domain_group_pause, 
+      METH_VARARGS, "\n"
+      "Temporarily pause execution of all domains in a group.\n"
+      " grp [int]: Identifier of domain group to be paused.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
     { "domain_unpause", 
       (PyCFunction)pyxc_domain_unpause, 
       METH_VARARGS, "\n"
@@ -1057,11 +1214,25 @@ static PyMethodDef pyxc_methods[] = {
       " dom [int]: Identifier of domain to be unpaused.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
+    { "domain_group_unpause", 
+      (PyCFunction)pyxc_domain_group_unpause, 
+      METH_VARARGS, "\n"
+      "(Re)start execution of all domains in a group.\n"
+      " grp [int]: Identifier of domain group to be unpaused.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
     { "domain_destroy", 
       (PyCFunction)pyxc_domain_destroy, 
       METH_VARARGS, "\n"
       "Destroy a domain.\n"
       " dom [int]:    Identifier of domain to be destroyed.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "domain_group_destroy", 
+      (PyCFunction)pyxc_domain_group_destroy, 
+      METH_VARARGS, "\n"
+      "Destroy an empty domain group.\n"
+      " grp [int]:    Identifier of domain group to be destroyed.\n\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
     { "domain_resume", 
@@ -1129,6 +1300,20 @@ static PyMethodDef pyxc_methods[] = {
       " shutdown_reason [int]: Numeric code from guest OS, explaining "
       "reason why it shut itself down.\n" },
 
+    { "domain_group_getinfo", 
+      (PyCFunction)pyxc_domain_group_getinfo, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Get information regarding a set of domain groups.\n"
+      " first_grp [int, 0]:    First domain to retrieve info about.\n"
+      " max_grps  [int, 1024]: Maximum number of domains to retrieve info"
+      " about.\n\n"
+      "Returns:  [list of dicts] if list length is less than 'max_grps'\n"
+      "          parameter then there was an error, or the end of the\n"
+      "          group-id space was reached.\n"
+      " grp               [int]: Id of group to which this info pertains\n"
+      " size              [int]: Number of domains in this group\n"
+      " member_list       [int array]: Unordered list of member Ids\n"},
+
     { "vcpu_getinfo", 
       (PyCFunction)pyxc_vcpu_getinfo, 
       METH_VARARGS | METH_KEYWORDS, "\n"
@@ -1337,6 +1522,14 @@ static PyMethodDef pyxc_methods[] = {
       METH_VARARGS, "\n"
       "Set a domain's time offset to Dom0's localtime\n"
       " dom        [int]: Domain whose time offset is being set.\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "domain_group_join",
+      (PyCFunction)pyxc_domain_group_join,
+      METH_VARARGS, "\n"
+      "Request that the given domain join the supplied group.\n"
+      " dom [int]: Identifier of domain joining group.\n"
+      " grp [int]: Identifier of group the given domain is joining.\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
 #ifdef __powerpc__
diff -r ecb6cd61a9cf tools/libxc/xc_domain_group.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_domain_group.c     Tue Feb 20 12:59:11 2007 -0500
@@ -0,0 +1,100 @@
+/******************************************************************************
+ * xc_domain_group.c
+ * 
+ * API for manipulating and obtaining information on domain groups.
+ * 
+ * Chris Bookholt (hap10@xxxxxxxxxxxxxx)
+ */
+
+#include "xc_private.h"
+#include <xen/memory.h>
+
+int xc_domain_group_create(int xc_handle, 
+                          xen_domain_group_handle_t handle,
+                          uint32_t *pdgid)
+{
+       int err;
+       DECLARE_DOMGRPCTL;
+       domgrpctl.cmd = XEN_DOMGRPCTL_creategrp;
+       memcpy(domgrpctl.u.create_grp.handle, handle,
+              sizeof(xen_domain_group_handle_t));
+       
+       err = do_domgrpctl(xc_handle, &domgrpctl);
+       if (err) 
+               return err;
+
+       *pdgid = (uint16_t)domgrpctl.u.get_grp_info.dgid;
+       return 0;
+}
+
+int xc_domain_group_pause(int xc_handle, uint32_t dgid)
+{
+       DECLARE_DOMGRPCTL;
+       domgrpctl.cmd = XEN_DOMGRPCTL_pausegrp;
+       domgrpctl.u.pause_grp.dgid = (dgid_t) dgid;
+       return do_domgrpctl(xc_handle, &domgrpctl);
+}
+
+int xc_domain_group_unpause(int xc_handle, uint32_t dgid)
+{
+       DECLARE_DOMGRPCTL;
+       domgrpctl.cmd = XEN_DOMGRPCTL_unpausegrp;
+       domgrpctl.u.unpause_grp.dgid = (dgid_t) dgid;
+       return do_domgrpctl(xc_handle, &domgrpctl);
+}
+
+int xc_domain_group_destroy(int xc_handle, uint32_t dgid)
+{
+       DECLARE_DOMGRPCTL;
+       domgrpctl.cmd = XEN_DOMGRPCTL_destroygrp;
+       domgrpctl.u.destroy_grp.dgid = (dgid_t) dgid;
+       return do_domgrpctl(xc_handle, &domgrpctl);
+}
+
+int xc_domain_group_join(int xc_handle, uint32_t domid, uint32_t dgid)
+{
+       DECLARE_DOMGRPCTL;
+       domgrpctl.cmd = XEN_DOMGRPCTL_joingrp;
+       domgrpctl.u.join_grp.domid = (domid_t) domid;
+       domgrpctl.u.join_grp.dgid = (dgid_t) dgid;
+       return do_domgrpctl(xc_handle, &domgrpctl);
+}
+
+#define TRANSFER_LIST_TO_INFO(list_name)                               \
+       memcpy(info->list_name, domgrpctl.u.get_grp_info.list_name,     \
+               MAX_GROUP_SIZE*sizeof(domid_t));
+
+int xc_domain_group_getinfo(int xc_handle, uint32_t first_dgid,
+                           unsigned int max_grps, xc_grpinfo_t * info)
+{
+       unsigned int nr_grps;
+       uint32_t next_dgid = first_dgid;
+       DECLARE_DOMGRPCTL;
+       int rc = 0;
+
+       memset(info, 0, max_grps * sizeof(xc_grpinfo_t));
+
+       for (nr_grps = 0; nr_grps < max_grps; nr_grps++) {
+               domgrpctl.cmd = XEN_DOMGRPCTL_getgrpinfo;
+               domgrpctl.u.get_grp_info.dgid = (dgid_t) next_dgid;
+
+               rc = do_domgrpctl(xc_handle, &domgrpctl);
+               if (rc < 0)
+                       break;
+
+               info->dgid = (uint16_t) domgrpctl.u.get_grp_info.dgid;
+               info->size = (uint16_t) domgrpctl.u.get_grp_info.size;
+
+               TRANSFER_LIST_TO_INFO(member_list);
+               memcpy(info->handle, domgrpctl.u.get_grp_info.handle,
+                      sizeof(xen_domain_group_handle_t));
+
+               next_dgid = (uint16_t) domgrpctl.u.get_grp_info.dgid + 1;
+               info++;
+       }
+
+       if (!nr_grps)
+               return rc;
+
+       return nr_grps;
+}




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 3/8] [libxc] Domain Groups: libxc handlers for group data, Chris <=