WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [Patch 4/6] Cpupools: python scripts

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [Patch 4/6] Cpupools: python scripts
From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Date: Tue, 20 Apr 2010 11:40:20 +0200
Delivery-date: Tue, 20 Apr 2010 02:45:00 -0700
Dkim-signature: v=1; a=rsa-sha256; c=simple/simple; d=ts.fujitsu.com; i=juergen.gross@xxxxxxxxxxxxxx; q=dns/txt; s=s1536b; t=1271756423; x=1303292423; h=message-id:date:from:mime-version:to:subject; z=Message-ID:=20<4BCD7684.80500@xxxxxxxxxxxxxx>|Date:=20Tu e,=2020=20Apr=202010=2011:40:20=20+0200|From:=20Juergen =20Gross=20<juergen.gross@xxxxxxxxxxxxxx>|MIME-Version: =201.0|To:=20"xen-devel@xxxxxxxxxxxxxxxxxxx"=20<xen-devel @lists.xensource.com>|Subject:=20[Patch=204/6]=20Cpupools :=20python=20scripts; bh=UCeJR+/nFpzY+x070X0eTYwbgl6akqYdgfzwbDR69bc=; b=CrgWijplthGtFONZiwkOlfzCesSGWZOoV8SN0zKxbGg09BI1AoPGfANt fIyIetY0+7DxIniPFF/dKxDNnKJBjoVX0SyUmarXWxcOPl/YoAcil0BwH a74jQRHLJ1RvdwFOhlQ2OZPQwEusJdJ8jfolKiYWmqh9HXsPSp13Rq7Xp JCPaogOojfN8+Qn4xnG5g5aWefd1dyrtIB4D2K8YtfkDVanGLHXVHfLGW WVYti9VYt8sp0Qoda5+xNj7WBnh6b;
Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=CfxPYJ5c3je3SbPkwg/9rbxTAkq3YrEpNg3c1wqR875s1Tamo2ZmxlnX or9o+y4eGMEhZrGpoCXetXKhOn1CLkLZXWnxyKMK8DgxI/fLy/6a2g0Lk n+aMbZHJmb4CKKY19YLKVYK7OVVraJSYS0r8PC9lnGYT2MIyHcNcBIAss LADB9RggOCyK/HDblEMlQxiEy0JoQuRG9ubrX281ikCgYR0zg2by9j8I5 o0wE3YhgR+7EcPWIWEnK7wt1R8DNN;
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Fujitsu Technology Solutions
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla-Thunderbird 2.0.0.24 (X11/20100329)
-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

diff -r fadf63ab49e7 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Tue Apr 20 11:10:40 2010 +0200
@@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb
                                     PyObject *args,
                                     PyObject *kwds)
 {
-    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0;
+    uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0;
     int      ret, i;
     PyObject *pyhandle = NULL;
     xen_domain_handle_t handle = { 
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
 
-    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", 
"target", NULL };
+    static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", 
"target", "cpupool", NULL };
 
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list,
-                                      &dom, &ssidref, &pyhandle, &flags, 
&target))
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom,
+                                      &ssidref, &pyhandle, &flags, &target,
+                                      &cpupool))
         return NULL;
     if ( pyhandle != NULL )
     {
@@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb
         }
     }
 
+    flags |= XEN_DOMCTL_CDF_pool;
     if ( (ret = xc_domain_create(self->xc_handle, ssidref,
-                                 handle, flags, &dom)) < 0 )
+                                 handle, flags, &dom, cpupool)) < 0 )
         return pyxc_error_to_exception();
 
     if ( target )
@@ -329,7 +331,7 @@ static PyObject *pyxc_domain_getinfo(XcO
     {
         info_dict = Py_BuildValue(
             "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i"
-            ",s:L,s:L,s:L,s:i,s:i}",
+            ",s:L,s:L,s:L,s:i,s:i,s:i}",
             "domid",           (int)info[i].domid,
             "online_vcpus",    info[i].nr_online_vcpus,
             "max_vcpu_id",     info[i].max_vcpu_id,
@@ -344,7 +346,8 @@ static PyObject *pyxc_domain_getinfo(XcO
             "cpu_time",        (long long)info[i].cpu_time,
             "maxmem_kb",       (long long)info[i].max_memkb,
             "ssidref",         (int)info[i].ssidref,
-            "shutdown_reason", info[i].shutdown_reason);
+            "shutdown_reason", info[i].shutdown_reason,
+            "cpupool",         (int)info[i].cpupool);
         pyhandle = PyList_New(sizeof(xen_domain_handle_t));
         if ( (pyhandle == NULL) || (info_dict == NULL) )
         {
@@ -1893,6 +1896,179 @@ static PyObject *pyxc_dom_set_memshr(XcO
     return zero;
 }
 
+static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+{
+    PyObject *cpulist = NULL;
+    uint32_t i;
+
+    cpulist = PyList_New(0);
+    for ( i = 0; cpumap != 0; i++ )
+    {
+        if ( cpumap & 1 )
+        {
+            PyObject* pyint = PyInt_FromLong(i);
+
+            PyList_Append(cpulist, pyint);
+            Py_DECREF(pyint);
+        }
+        cpumap >>= 1;
+    }
+    return cpulist;
+}
+
+static PyObject *pyxc_cpupool_create(XcObject *self,
+                                     PyObject *args,
+                                     PyObject *kwds)
+{
+    uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT;
+
+    static char *kwd_list[] = { "pool", "sched", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool,
+                                      &sched))
+        return NULL;
+
+    if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 )
+        return pyxc_error_to_exception();
+
+    return PyInt_FromLong(cpupool);
+}
+
+static PyObject *pyxc_cpupool_destroy(XcObject *self,
+                                      PyObject *args)
+{
+    uint32_t cpupool;
+
+    if (!PyArg_ParseTuple(args, "i", &cpupool))
+        return NULL;
+
+    if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_getinfo(XcObject *self,
+                                      PyObject *args,
+                                      PyObject *kwds)
+{
+    PyObject *list, *info_dict;
+
+    uint32_t first_pool = 0;
+    int max_pools = 1024, nr_pools, i;
+    xc_cpupoolinfo_t *info;
+
+    static char *kwd_list[] = { "first_pool", "max_pools", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
+                                      &first_pool, &max_pools) )
+        return NULL;
+
+    info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
+    if (info == NULL)
+        return PyErr_NoMemory();
+
+    nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, 
info);
+
+    if (nr_pools < 0)
+    {
+        free(info);
+        return pyxc_error_to_exception();
+    }
+
+    list = PyList_New(nr_pools);
+    for ( i = 0 ; i < nr_pools; i++ )
+    {
+        info_dict = Py_BuildValue(
+            "{s:i,s:i,s:i,s:N}",
+            "cpupool",         (int)info[i].cpupool_id,
+            "sched",           info[i].sched_id,
+            "n_dom",           info[i].n_dom,
+            "cpulist",         cpumap_to_cpulist(info[i].cpumap));
+        if ( info_dict == NULL )
+        {
+            Py_DECREF(list);
+            if ( info_dict != NULL ) { Py_DECREF(info_dict); }
+            free(info);
+            return NULL;
+        }
+        PyList_SetItem(list, i, info_dict);
+    }
+
+    free(info);
+
+    return list;
+}
+
+static PyObject *pyxc_cpupool_addcpu(XcObject *self,
+                                     PyObject *args,
+                                     PyObject *kwds)
+{
+    uint32_t cpupool;
+    int cpu = -1;
+
+    static char *kwd_list[] = { "cpupool", "cpu", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
+                                      &cpupool, &cpu) )
+        return NULL;
+
+    if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_removecpu(XcObject *self,
+                                        PyObject *args,
+                                        PyObject *kwds)
+{
+    uint32_t cpupool;
+    int cpu = -1;
+
+    static char *kwd_list[] = { "cpupool", "cpu", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list,
+                                      &cpupool, &cpu) )
+        return NULL;
+
+    if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_movedomain(XcObject *self,
+                                         PyObject *args,
+                                         PyObject *kwds)
+{
+    uint32_t cpupool, domid;
+
+    static char *kwd_list[] = { "cpupool", "domid", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list,
+                                      &cpupool, &domid) )
+        return NULL;
+
+    if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0)
+        return pyxc_error_to_exception();
+
+    Py_INCREF(zero);
+    return zero;
+}
+
+static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
+{
+    uint64_t cpumap;
+
+    if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+        return pyxc_error_to_exception();
+
+    return cpumap_to_cpulist(cpumap);
+}
 
 static PyMethodDef pyxc_methods[] = {
     { "handle",
@@ -2008,7 +2184,8 @@ static PyMethodDef pyxc_methods[] = {
       " maxmem_kb [int]: Maximum memory limit, in kilobytes\n"
       " cpu_time [long]: CPU time consumed, in nanoseconds\n"
       " shutdown_reason [int]: Numeric code from guest OS, explaining "
-      "reason why it shut itself down.\n" },
+      "reason why it shut itself down.\n"
+      " cpupool  [int]   Id of cpupool domain is bound to.\n" },
 
     { "vcpu_getinfo", 
       (PyCFunction)pyxc_vcpu_getinfo, 
@@ -2148,6 +2325,24 @@ static PyMethodDef pyxc_methods[] = {
       METH_VARARGS, "\n"
       "Get the scheduling parameters for a domain when running with the\n"
       "SMP credit scheduler.\n"
+      " domid     [int]:   domain id to get\n"
+      "Returns:   [dict]\n"
+      " weight    [short]: domain's scheduling weight\n"},
+
+    { "sched_credit2_domain_set",
+      (PyCFunction)pyxc_sched_credit2_domain_set,
+      METH_KEYWORDS, "\n"
+      "Set the scheduling parameters for a domain when running with the\n"
+      "SMP credit2 scheduler.\n"
+      " domid     [int]:   domain id to set\n"
+      " weight    [short]: domain's scheduling weight\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "sched_credit2_domain_get",
+      (PyCFunction)pyxc_sched_credit2_domain_get,
+      METH_VARARGS, "\n"
+      "Get the scheduling parameters for a domain when running with the\n"
+      "SMP credit2 scheduler.\n"
       " domid     [int]:   domain id to get\n"
       "Returns:   [dict]\n"
       " weight    [short]: domain's scheduling weight\n"},
@@ -2438,6 +2633,66 @@ static PyMethodDef pyxc_methods[] = {
       " enable  [int,0|1]:    Disable or enable?\n"
       "Returns: [int] 0 on success; -1 on error.\n" },
 
+    { "cpupool_create",
+      (PyCFunction)pyxc_cpupool_create,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Create new cpupool.\n"
+      " pool    [int, 0]: cpupool identifier to use (allocated if zero).\n"
+      " sched   [int]: scheduler to use (credit if unspecified).\n\n"
+      "Returns: [int] new cpupool identifier; -1 on error.\n" },
+
+    { "cpupool_destroy",
+      (PyCFunction)pyxc_cpupool_destroy,
+      METH_VARARGS, "\n"
+      "Destroy a cpupool.\n"
+      " pool [int]:    Identifier of cpupool to be destroyed.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_getinfo",
+      (PyCFunction)pyxc_cpupool_getinfo,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Get information regarding a set of cpupools, in increasing id order.\n"
+      " first_pool [int, 0]:    First cpupool to retrieve info about.\n"
+      " max_pools  [int, 1024]: Maximum number of cpupools to retrieve info"
+      " about.\n\n"
+      "Returns: [list of dicts] if list length is less than 'max_pools'\n"
+      "         parameter then there was an error, or the end of the\n"
+      "         cpupool-id space was reached.\n"
+      " pool     [int]: Identifier of cpupool to which this info pertains\n"
+      " sched    [int]:  Scheduler used for this cpupool\n"
+      " n_dom    [int]:  Number of Domains in this cpupool\n"
+      " cpulist  [list]: List of CPUs this cpupool is using\n" },
+
+    { "cpupool_addcpu",
+       (PyCFunction)pyxc_cpupool_addcpu,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Add a cpu to a cpupool.\n"
+      " pool    [int]: Identifier of cpupool.\n"
+      " cpu     [int, -1]: Cpu to add (lowest free if -1)\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_removecpu",
+       (PyCFunction)pyxc_cpupool_removecpu,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Remove a cpu from a cpupool.\n"
+      " pool    [int]: Identifier of cpupool.\n"
+      " cpu     [int, -1]: Cpu to remove (highest used if -1)\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_movedomain",
+       (PyCFunction)pyxc_cpupool_movedomain,
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Move a domain to another cpupool.\n"
+      " pool    [int]: Identifier of cpupool to move domain to.\n"
+      " dom     [int]: Domain to move\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "cpupool_freeinfo",
+      (PyCFunction)pyxc_cpupool_freeinfo,
+      METH_NOARGS, "\n"
+      "Get info about cpus not in any cpupool.\n"
+      "Returns: [list]: List of CPUs\n" },
+
     { NULL, NULL, 0, NULL }
 };
 
diff -r fadf63ab49e7 tools/python/xen/xend/XendAPI.py
--- a/tools/python/xen/xend/XendAPI.py  Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendAPI.py  Tue Apr 20 11:10:40 2010 +0200
@@ -51,6 +51,7 @@ from XendPSCSI import XendPSCSI, XendPSC
 from XendPSCSI import XendPSCSI, XendPSCSI_HBA
 from XendDSCSI import XendDSCSI, XendDSCSI_HBA
 from XendXSPolicy import XendXSPolicy, XendACMPolicy
+from xen.xend.XendCPUPool import XendCPUPool
 
 from XendAPIConstants import *
 from xen.util.xmlrpclib2 import stringify
@@ -498,6 +499,7 @@ classes = {
     'PSCSI_HBA'    : valid_object("PSCSI_HBA"),
     'DSCSI'        : valid_object("DSCSI"),
     'DSCSI_HBA'    : valid_object("DSCSI_HBA"),
+    'cpu_pool'     : valid_object("cpu_pool"),
 }
 
 autoplug_classes = {
@@ -514,6 +516,7 @@ autoplug_classes = {
     'DSCSI_HBA'   : XendDSCSI_HBA,
     'XSPolicy'    : XendXSPolicy,
     'ACMPolicy'   : XendACMPolicy,
+    'cpu_pool'    : XendCPUPool,
 }
 
 class XendAPI(object):
@@ -914,7 +917,8 @@ class XendAPI(object):
                     'API_version_minor',
                     'API_version_vendor',
                     'API_version_vendor_implementation',
-                    'enabled']
+                    'enabled',
+                    'resident_cpu_pools']
     
     host_attr_rw = ['name_label',
                     'name_description',
@@ -1014,6 +1018,8 @@ class XendAPI(object):
         return xen_api_todo()
     def host_get_logging(self, _, host_ref):
         return xen_api_todo()
+    def host_get_resident_cpu_pools(self, _, host_ref):
+        return xen_api_success(XendCPUPool.get_all())
 
     # object methods
     def host_disable(self, session, host_ref):
@@ -1076,7 +1082,9 @@ class XendAPI(object):
                   'PBDs': XendPBD.get_all(),
                   'PPCIs': XendPPCI.get_all(),
                   'PSCSIs': XendPSCSI.get_all(),
-                  'PSCSI_HBAs': XendPSCSI_HBA.get_all()}
+                  'PSCSI_HBAs': XendPSCSI_HBA.get_all(),
+                  'resident_cpu_pools': XendCPUPool.get_all(),
+                 }
         return xen_api_success(record)
 
     def host_tmem_thaw(self, _, host_ref, cli_id):
@@ -1185,7 +1193,10 @@ class XendAPI(object):
                         'stepping',
                         'flags',
                         'utilisation',
-                        'features']
+                        'features',
+                        'cpu_pool']
+
+    host_cpu_funcs  = [('get_unassigned_cpus', 'Set(host_cpu)')]
 
     # attributes
     def _host_cpu_get(self, ref, field):
@@ -1210,21 +1221,28 @@ class XendAPI(object):
         return self._host_cpu_get(ref, 'flags')
     def host_cpu_get_utilisation(self, _, ref):
         return xen_api_success(XendNode.instance().get_host_cpu_load(ref))
+    def host_cpu_get_cpu_pool(self, _, ref):
+        return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref))
 
     # object methods
     def host_cpu_get_record(self, _, ref):
         node = XendNode.instance()
         record = dict([(f, node.get_host_cpu_field(ref, f))
                        for f in self.host_cpu_attr_ro
-                       if f not in ['uuid', 'host', 'utilisation']])
+                       if f not in ['uuid', 'host', 'utilisation', 
'cpu_pool']])
         record['uuid'] = ref
         record['host'] = node.uuid
         record['utilisation'] = node.get_host_cpu_load(ref)
+        record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref)
         return xen_api_success(record)
 
     # class methods
     def host_cpu_get_all(self, session):
         return xen_api_success(XendNode.instance().get_host_cpu_refs())
+    def host_cpu_get_unassigned_cpus(self, session):
+        return xen_api_success(
+            [ref for ref in XendNode.instance().get_host_cpu_refs()
+                 if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0])
 
 
     # Xen API: Class host_metrics
@@ -1284,6 +1302,7 @@ class XendAPI(object):
                   'is_control_domain',
                   'metrics',
                   'crash_dumps',
+                  'cpu_pool',
                   ]
                   
     VM_attr_rw = ['name_label',
@@ -1312,7 +1331,9 @@ class XendAPI(object):
                   'platform',
                   'PCI_bus',
                   'other_config',
-                  'security_label']
+                  'security_label',
+                  'pool_name',
+                  ]
 
     VM_methods = [('clone', 'VM'),
                   ('start', None),
@@ -1340,7 +1361,9 @@ class XendAPI(object):
                   ('set_memory_dynamic_min_live', None),
                   ('send_trigger', None),
                   ('migrate', None),
-                  ('destroy', None)]
+                  ('destroy', None),
+                  ('cpu_pool_migrate', None),
+                  ]
     
     VM_funcs  = [('create', 'VM'),
                  ('restore', None),
@@ -1540,6 +1563,17 @@ class XendAPI(object):
         return xen_api_success(
             xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain())
 
+    def VM_get_cpu_pool(self, session, vm_ref):
+        dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
+        pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool())
+        return xen_api_success(pool_ref)
+
+    def VM_get_pool_name(self, session, vm_ref):
+        return self.VM_get('pool_name', session, vm_ref)
+
+    def VM_set_pool_name(self, session, vm_ref, value):
+        return self.VM_set('pool_name', session, vm_ref, value)
+
     def VM_set_name_label(self, session, vm_ref, label):
         dom = XendDomain.instance().get_vm_by_uuid(vm_ref)
         dom.setName(label)
@@ -1618,7 +1652,8 @@ class XendAPI(object):
             if key.startswith("cpumap"):
                 vcpu = int(key[6:])
                 try:
-                    xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value)
+                    cpus = map(int, value.split(","))
+                    xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus)
                 except Exception, ex:
                     log.exception(ex)
 
@@ -1834,7 +1869,9 @@ class XendAPI(object):
             'is_control_domain': xeninfo.info['is_control_domain'],
             'metrics': xeninfo.get_metrics(),
             'security_label': xeninfo.get_security_label(),
-            'crash_dumps': []
+            'crash_dumps': [],
+            'pool_name': xeninfo.info.get('pool_name'),
+            'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()),
         }
         return xen_api_success(record)
 
@@ -1930,6 +1967,25 @@ class XendAPI(object):
     def VM_restore(self, _, src, paused):
         xendom = XendDomain.instance()
         xendom.domain_restore(src, bool(paused))
+        return xen_api_success_void()
+
+    def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref):
+        xendom = XendDomain.instance()
+        xeninfo = xendom.get_vm_by_uuid(vm_ref)
+        domid = xeninfo.getDomid()
+        pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass())
+        if pool == None:
+            return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref])
+        if domid is not None:
+            if domid == 0:
+                return xen_api_error(['OPERATION_NOT_ALLOWED',
+                    'could not move Domain-0'])
+            try:
+                XendCPUPool.move_domain(cpu_pool_ref, domid)
+            except Exception, ex:
+                return xen_api_error(['INTERNAL_ERROR',
+                    'could not move domain'])
+        self.VM_set('pool_name', session, vm_ref, pool.get_name_label())
         return xen_api_success_void()
 
 
diff -r fadf63ab49e7 tools/python/xen/xend/XendConfig.py
--- a/tools/python/xen/xend/XendConfig.py       Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendConfig.py       Tue Apr 20 11:10:40 2010 +0200
@@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = {
     'PV_bootloader': 'bootloader',
     'PV_bootloader_args': 'bootloader_args',
     'Description': 'description',
+    'pool_name' : 'pool_name',
 }
 
 LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG)
@@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = {
     's3_integrity' : int,
     'superpages' : int,
     'memory_sharing': int,
+    'pool_name' : str,
     'Description': str,
 }
 
@@ -279,6 +281,7 @@ LEGACY_CFG_TYPES = {
     'bootloader':    str,
     'bootloader_args': str,
     'description':   str,
+    'pool_name':     str,
 }
 
 # Values that should be stored in xenstore's /vm/<uuid> that is used
@@ -300,6 +303,7 @@ LEGACY_XENSTORE_VM_PARAMS = [
     'on_xend_stop',
     'bootloader',
     'bootloader_args',
+    'pool_name',
 ]
 
 ##
@@ -408,6 +412,7 @@ class XendConfig(dict):
             'other_config': {},
             'platform': {},
             'target': 0,
+            'pool_name' : 'Pool-0',
             'superpages': 0,
             'description': '',
         }
diff -r fadf63ab49e7 tools/python/xen/xend/XendConstants.py
--- a/tools/python/xen/xend/XendConstants.py    Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendConstants.py    Tue Apr 20 11:10:40 2010 +0200
@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir(
 
 XS_VMROOT = "/vm/"
 
+XS_POOLROOT = "/local/pool/"
+
 NR_PCI_FUNC = 8
 NR_PCI_DEV = 32
 NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV
diff -r fadf63ab49e7 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Tue Apr 20 11:10:40 2010 +0200
@@ -60,6 +60,7 @@ from xen.xend.xenstore.xswatch import xs
 from xen.xend.xenstore.xswatch import xswatch
 from xen.xend.XendConstants import *
 from xen.xend.XendAPIConstants import *
+from xen.xend.XendCPUPool import XendCPUPool
 from xen.xend.server.DevConstants import xenbusState
 from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString
 
@@ -2540,6 +2541,19 @@ class XendDomainInfo:
         oos = self.info['platform'].get('oos', 1)
         oos_off = 1 - int(oos)
 
+        # look-up pool id to use
+        pool_name = self.info['pool_name']
+        if len(pool_name) == 0:
+            pool_name = "Pool-0"
+
+        pool = XendCPUPool.lookup_pool(pool_name)
+
+        if pool is None:
+            raise VmError("unknown pool %s" % pool_name)
+        pool_id = pool.query_pool_id()
+        if pool_id is None:
+            raise VmError("pool %s not activated" % pool_name)
+
         flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | 
(int(oos_off) << 3)
 
         try:
@@ -2548,6 +2562,7 @@ class XendDomainInfo:
                 ssidref = ssidref,
                 handle = uuid.fromString(self.info['uuid']),
                 flags = flags,
+                cpupool = pool_id,
                 target = self.info.target())
         except Exception, e:
             # may get here if due to ACM the operation is not permitted
@@ -3585,6 +3600,11 @@ class XendDomainInfo:
 
         retval = xc.sched_credit_domain_get(self.getDomid())
         return retval
+    def get_cpu_pool(self):
+        if self.getDomid() is None:
+            return None
+        xeninfo = dom_get(self.domid)
+        return xeninfo['cpupool']
     def get_power_state(self):
         return XEN_API_VM_POWER_STATE[self._stateGet()]
     def get_platform(self):
diff -r fadf63ab49e7 tools/python/xen/xend/XendError.py
--- a/tools/python/xen/xend/XendError.py        Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendError.py        Tue Apr 20 11:10:40 2010 +0200
@@ -18,6 +18,7 @@
 
 from xmlrpclib import Fault
 
+import types
 import XendClient
 
 class XendInvalidDomain(Fault):
@@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError):
     def __str__(self):
         return 'DIRECT_PCI_ERROR: %s' % self.error
 
+class PoolError(XendAPIError):
+    def __init__(self, error, spec=None):
+        XendAPIError.__init__(self)
+        self.spec = []
+        if spec:
+            if isinstance(spec, types.ListType):
+                self.spec = spec
+            else:
+                self.spec = [spec]
+        self.error = error
+
+    def get_api_error(self):
+        return [self.error] + self.spec
+
+    def __str__(self):
+        if self.spec:
+            return '%s: %s' % (self.error, self.spec)
+        else:
+            return '%s' % self.error
+
 class VDIError(XendAPIError):
     def __init__(self, error, vdi):
         XendAPIError.__init__(self)
diff -r fadf63ab49e7 tools/python/xen/xend/XendNode.py
--- a/tools/python/xen/xend/XendNode.py Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/XendNode.py Tue Apr 20 11:10:40 2010 +0200
@@ -43,6 +43,7 @@ from XendMonitor import XendMonitor
 from XendMonitor import XendMonitor
 from XendPPCI import XendPPCI
 from XendPSCSI import XendPSCSI, XendPSCSI_HBA
+from xen.xend.XendCPUPool import XendCPUPool
 
 class XendNode:
     """XendNode - Represents a Domain 0 Host."""
@@ -158,6 +159,8 @@ class XendNode:
         self._init_PPCIs()
 
         self._init_PSCSIs()
+
+        self._init_cpu_pools()
 
 
     def _init_networks(self):
@@ -361,6 +364,18 @@ class XendNode:
         for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items():
             XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host})
 
+    def _init_cpu_pools(self):
+        # Initialise cpu_pools
+        saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass())
+        if saved_cpu_pools:
+            for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items():
+                try:
+                    XendCPUPool.recreate(cpu_pool, cpu_pool_uuid)
+                except CreateUnspecifiedAttributeError:
+                    log.warn("Error recreating %s %s",
+                             (XendCPUPool.getClass(), cpu_pool_uuid))
+        XendCPUPool.recreate_active_pools()
+
 
     def add_network(self, interface):
         # TODO
@@ -581,6 +596,7 @@ class XendNode:
         self.save_PPCIs()
         self.save_PSCSIs()
         self.save_PSCSI_HBAs()
+        self.save_cpu_pools()
 
     def save_PIFs(self):
         pif_records = dict([(pif_uuid, XendAPIStore.get(
@@ -622,6 +638,12 @@ class XendNode:
                                       pscsi_HBA_uuid, 
"PSCSI_HBA").get_record())
                                 for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()])
         self.state_store.save_state('pscsi_HBA', pscsi_HBA_records)
+
+    def save_cpu_pools(self):
+        cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get(
+                    cpu_pool_uuid, XendCPUPool.getClass()).get_record())
+                    for cpu_pool_uuid in XendCPUPool.get_all_managed()])
+        self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records)
 
     def shutdown(self):
         return 0
@@ -925,6 +947,7 @@ class XendNode:
         # physinfo is in KiB, need it in MiB
         info['total_memory'] = info['total_memory'] / 1024
         info['free_memory']  = info['free_memory'] / 1024
+        info['free_cpus'] = len(XendCPUPool.unbound_cpus())
 
         ITEM_ORDER = ['nr_cpus',
                       'nr_nodes',
@@ -935,6 +958,7 @@ class XendNode:
                       'virt_caps',
                       'total_memory',
                       'free_memory',
+                      'free_cpus',
                       ]
 
         if show_numa != 0:
diff -r fadf63ab49e7 tools/python/xen/xend/server/SrvServer.py
--- a/tools/python/xen/xend/server/SrvServer.py Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xend/server/SrvServer.py Tue Apr 20 11:10:40 2010 +0200
@@ -52,6 +52,7 @@ from xen.xend.XendLogging import log
 from xen.xend.XendLogging import log
 from xen.xend.XendClient import XEN_API_SOCKET
 from xen.xend.XendDomain import instance as xenddomain
+from xen.xend.XendCPUPool import XendCPUPool
 from xen.web.SrvDir import SrvDir
 
 from SrvRoot import SrvRoot
@@ -146,6 +147,12 @@ class XendServers:
                 status.close()
                 status = None
 
+            # auto start pools before domains are started
+            try:
+                XendCPUPool.autostart_pools()
+            except Exception, e:
+                log.exception("Failed while autostarting pools")
+
             # Reaching this point means we can auto start domains
             try:
                 xenddomain().autostart_domains()
diff -r fadf63ab49e7 tools/python/xen/xend/server/XMLRPCServer.py
--- a/tools/python/xen/xend/server/XMLRPCServer.py      Mon Apr 19 17:57:28 
2010 +0100
+++ b/tools/python/xen/xend/server/XMLRPCServer.py      Tue Apr 20 11:10:40 
2010 +0200
@@ -33,6 +33,7 @@ from xen.xend.XendConstants import DOM_S
 from xen.xend.XendConstants import DOM_STATE_RUNNING
 from xen.xend.XendLogging import log
 from xen.xend.XendError import XendInvalidDomain
+from xen.xend.XendCPUPool import XendCPUPool
 
 # vcpu_avail is a long and is not needed by the clients.  It's far easier
 # to just remove it then to try and marshal the long.
@@ -97,6 +98,10 @@ methods = ['device_create', 'device_conf
            'getRestartCount', 'getBlockDeviceClass']
 
 exclude = ['domain_create', 'domain_restore']
+
+POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list',
+              'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove',
+              'pool_migrate']
 
 class XMLRPCServer:
     def __init__(self, auth, use_xenapi, use_tcp = False,
@@ -197,6 +202,11 @@ class XMLRPCServer:
                 if name not in exclude:
                     self.server.register_function(fn, "xend.domain.%s" % 
name[7:])
 
+        # Functions in XendPool
+        for name in POOL_FUNCS:
+            fn = getattr(XendCPUPool, name)
+            self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:])
+
         # Functions in XendNode and XendDmesg
         for type, lst, n in [(XendNode,
                               ['info', 'pciinfo', 'send_debug_keys',
diff -r fadf63ab49e7 tools/python/xen/xm/create.dtd
--- a/tools/python/xen/xm/create.dtd    Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xm/create.dtd    Tue Apr 20 11:10:40 2010 +0200
@@ -50,6 +50,7 @@
                  s3_integrity           CDATA #REQUIRED
                  vcpus_max              CDATA #REQUIRED
                  vcpus_at_startup       CDATA #REQUIRED
+                 pool_name              CDATA #REQUIRED
                  actions_after_shutdown %NORMAL_EXIT; #REQUIRED 
                  actions_after_reboot   %NORMAL_EXIT; #REQUIRED
                  actions_after_crash    %CRASH_BEHAVIOUR; #REQUIRED
diff -r fadf63ab49e7 tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xm/create.py     Tue Apr 20 11:10:40 2010 +0200
@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults
           fn=set_bool, default=None,
           use="""Do not inject spurious page faults into this guest""")
 
+gopts.var('pool', val='POOL NAME',
+          fn=set_value, default=None,
+          use="""CPU pool to use for the VM""")
+
 gopts.var('pci_msitranslate', val='TRANSLATE',
           fn=set_int, default=1,
           use="""Global PCI MSI-INTx translation flag (0=disable;
@@ -1147,6 +1151,8 @@ def make_config(vals):
         config.append(['localtime', vals.localtime])
     if vals.oos:
         config.append(['oos', vals.oos])
+    if vals.pool:
+        config.append(['pool_name', vals.pool])
 
     config_image = configure_image(vals)
     if vals.bootloader:
diff -r fadf63ab49e7 tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xm/main.py       Tue Apr 20 11:10:40 2010 +0200
@@ -56,6 +56,7 @@ import xen.util.xsm.xsm as security
 import xen.util.xsm.xsm as security
 from xen.util.xsm.xsm import XSMError
 from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY
+from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp
 from xen.util import auxbin
 
 import XenAPI
@@ -238,6 +239,23 @@ SUBCOMMAND_HELP = {
     'tmem-freeable'  :  ('', 'Print freeable tmem (in MiB).'),
     'tmem-shared-auth' :  ('[<Domain>|-a|--all] [--uuid=<uuid>] 
[--auth=<0|1>]', 'De/authenticate shared tmem pool.'),
 
+    #
+    # pool commands
+    #
+    'pool-create'   :  ('<ConfigFile> [vars]',
+                        'Create a CPU pool based an ConfigFile.'),
+    'pool-new'      :  ('<ConfigFile> [vars]',
+                        'Adds a CPU pool to Xend CPU pool management'),
+    'pool-start'    :  ('<CPU Pool>', 'Starts a Xend CPU pool'),
+    'pool-list'     :  ('[<CPU Pool>] [-l|--long] [-c|--cpus]', 'List CPU 
pools on host'),
+    'pool-destroy'  :  ('<CPU Pool>', 'Deactivates a CPU pool'),
+    'pool-delete'   :  ('<CPU Pool>',
+                        'Removes a CPU pool from Xend management'),
+    'pool-cpu-add'  :  ('<CPU Pool> <CPU nr>', 'Adds a CPU to a CPU pool'),
+    'pool-cpu-remove': ('<CPU Pool> <CPU nr>', 'Removes a CPU from a CPU 
pool'),
+    'pool-migrate'  :  ('<Domain> <CPU Pool>',
+                        'Moves a domain into a CPU pool'),
+
     # security
 
     'addlabel'      :  ('<label> {dom <ConfigFile>|res <resource>|mgt <managed 
domain>} [<policy>]',
@@ -284,10 +302,15 @@ SUBCOMMAND_OPTIONS = {
        ('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
        ('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
     ),
+    'sched-credit2': (
+       ('-d DOMAIN', '--domain=DOMAIN', 'Domain to modify'),
+       ('-w WEIGHT', '--weight=WEIGHT', 'Weight (int)'),
+    ),
     'list': (
        ('-l', '--long',         'Output all VM details in SXP'),
        ('', '--label',          'Include security labels'),
        ('', '--state=<state>',  'Select only VMs with the specified state'),
+       ('', '--pool=<pool>',    'Select only VMs in specified cpu pool'),
     ),
     'console': (
        ('-q', '--quiet', 'Do not print an error message if the domain does not 
exist'),
@@ -348,6 +371,10 @@ SUBCOMMAND_OPTIONS = {
        ('-a', '--all', 'Authenticate for all tmem pools.'),
        ('-u', '--uuid', 'Specify uuid 
(abcdef01-2345-6789-01234567890abcdef).'),
        ('-A', '--auth', '0=auth,1=deauth'),
+    ),
+    'pool-list': (
+       ('-l', '--long', 'Output all CPU pool details in SXP format'),
+       ('-c', '--cpus', 'Output list of CPUs used by a pool'),
     ),
 }
 
@@ -494,9 +521,21 @@ tmem_commands = [
     "tmem-shared-auth",
     ]
 
+pool_commands = [
+    "pool-create",
+    "pool-new",
+    "pool-start",
+    "pool-list",
+    "pool-destroy",
+    "pool-delete",
+    "pool-cpu-add",
+    "pool-cpu-remove",
+    "pool-migrate",
+    ]
+
 all_commands = (domain_commands + host_commands + scheduler_commands +
                 device_commands + vnet_commands + security_commands +
-                acm_commands + flask_commands + tmem_commands + 
+                acm_commands + flask_commands + tmem_commands + pool_commands +
                 ['shell', 'event-monitor'])
 
 
@@ -890,7 +929,7 @@ def datetime_to_secs(v):
         v = str(v).replace(c, "")
     return time.mktime(time.strptime(v[0:14], '%Y%m%dT%H%M%S'))
 
-def getDomains(domain_names, state, full = 0):
+def getDomains(domain_names, state, full = 0, pool = None):
     if serverType == SERVER_XEN_API:
         doms_sxp = []
         doms_dict = []
@@ -899,6 +938,9 @@ def getDomains(domain_names, state, full
         dom_metrics_recs = server.xenapi.VM_metrics.get_all_records()
 
         for dom_ref, dom_rec in dom_recs.items():
+            if pool and pool != dom_rec['pool_name']:
+                continue
+
             dom_metrics_rec = dom_metrics_recs[dom_rec['metrics']]
 
             states = ('running', 'blocked', 'paused', 'shutdown',
@@ -939,7 +981,15 @@ def getDomains(domain_names, state, full
         if domain_names:
             return [server.xend.domain(dom, full) for dom in domain_names]
         else:
-            return server.xend.domains_with_state(True, state, full)
+            doms = server.xend.domains_with_state(True, state, full)
+            if not pool:
+                return doms
+            else:
+                doms_in_pool = []
+                for dom in doms:
+                    if sxp.child_value(dom, 'pool_name', '') == pool:
+                        doms_in_pool.append(dom)
+                return doms_in_pool
 
 
 def xm_list(args):
@@ -947,10 +997,11 @@ def xm_list(args):
     show_vcpus = 0
     show_labels = 0
     state = 'all'
+    pool = None
     try:
         (options, params) = getopt.gnu_getopt(args, 'lv',
                                               ['long','vcpus','label',
-                                               'state='])
+                                               'state=','pool='])
     except getopt.GetoptError, opterr:
         err(opterr)
         usage('list')
@@ -964,10 +1015,16 @@ def xm_list(args):
             show_labels = 1
         if k in ['--state']:
             state = v
+        if k in ['--pool']:
+            pool = v
 
     if state != 'all' and len(params) > 0:
         raise OptionError(
             "You may specify either a state or a particular VM, but not both")
+
+    if pool and len(params) > 0:
+        raise OptionError(
+            "You may specify either a pool or a particular VM, but not both")
 
     if show_vcpus:
         print >>sys.stderr, (
@@ -975,7 +1032,7 @@ def xm_list(args):
         xm_vcpu_list(params)
         return
 
-    doms = getDomains(params, state, use_long)
+    doms = getDomains(params, state, use_long, pool)
 
     if use_long:
         map(PrettyPrint.prettyprint, doms)
@@ -1823,6 +1880,80 @@ def xm_sched_credit2(args):
             if result != 0:
                 err(str(result))
 
+def xm_sched_credit2(args):
+    """Get/Set options for Credit2 Scheduler."""
+
+    check_sched_type('credit2')
+
+    try:
+        opts, params = getopt.getopt(args, "d:w:",
+            ["domain=", "weight="])
+    except getopt.GetoptError, opterr:
+        err(opterr)
+        usage('sched-credit2')
+
+    domid = None
+    weight = None
+
+    for o, a in opts:
+        if o in ["-d", "--domain"]:
+            domid = a
+        elif o in ["-w", "--weight"]:
+            weight = int(a)
+
+    doms = filter(lambda x : domid_match(domid, x),
+                  [parse_doms_info(dom)
+                  for dom in getDomains(None, 'all')])
+
+    if weight is None:
+        if domid is not None and doms == []:
+            err("Domain '%s' does not exist." % domid)
+            usage('sched-credit2')
+        # print header if we aren't setting any parameters
+        print '%-33s %4s %6s' % ('Name','ID','Weight')
+
+        for d in doms:
+            try:
+                if serverType == SERVER_XEN_API:
+                    info = server.xenapi.VM_metrics.get_VCPUs_params(
+                        server.xenapi.VM.get_metrics(
+                            get_single_vm(d['name'])))
+                else:
+                    info = server.xend.domain.sched_credit2_get(d['name'])
+            except xmlrpclib.Fault:
+                pass
+
+            if 'weight' not in info:
+                # domain does not support sched-credit2?
+                info = {'weight': -1}
+
+            info['weight'] = int(info['weight'])
+
+            info['name']  = d['name']
+            info['domid'] = str(d['domid'])
+            print( ("%(name)-32s %(domid)5s %(weight)6d") % info)
+    else:
+        if domid is None:
+            # place holder for system-wide scheduler parameters
+            err("No domain given.")
+            usage('sched-credit2')
+
+        if serverType == SERVER_XEN_API:
+            if doms[0]['domid']:
+                server.xenapi.VM.add_to_VCPUs_params_live(
+                    get_single_vm(domid),
+                    "weight",
+                    weight)
+            else:
+                server.xenapi.VM.add_to_VCPUs_params(
+                    get_single_vm(domid),
+                    "weight",
+                    weight)
+        else:
+            result = server.xend.domain.sched_credit2_set(domid, weight)
+            if result != 0:
+                err(str(result))
+
 def xm_info(args):
     arg_check(args, "info", 0, 1)
     
@@ -1890,6 +2021,13 @@ def xm_info(args):
             else:
                 return ""
                 
+        def getFreeCpuCount():
+            cnt = 0
+            for host_cpu_record in host_cpu_records:
+                if len(host_cpu_record.get("cpu_pool", [])) == 0:
+                    cnt += 1
+            return cnt
+
         info = {
             "host":              getVal(["name_label"]),
             "release":           getVal(["software_version", "release"]),
@@ -1901,6 +2039,7 @@ def xm_info(args):
             "threads_per_core":  getVal(["cpu_configuration", 
"threads_per_core"]),
             "cpu_mhz":           getCpuMhz(),
             "hw_caps":           getCpuFeatures(),
+            "free_cpus":         getFreeCpuCount(),
             "total_memory":      
int(host_metrics_record["memory_total"])/1024/1024,
             "free_memory":       
int(host_metrics_record["memory_free"])/1024/1024,
             "xen_major":         getVal(["software_version", "xen_major"]),
@@ -3528,6 +3667,169 @@ def xm_tmem_shared_auth(args):
         return server.xenapi.host.tmem_shared_auth(domid,uuid_str,auth)
     else:
         return server.xend.node.tmem_shared_auth(domid,uuid_str,auth)
+
+def get_pool_ref(name):
+    refs = server.xenapi.cpu_pool.get_by_name_label(name)
+    if len(refs) > 0:
+        return refs[0]
+    else:
+        err('unknown pool name')
+        sys.exit(1)
+
+def xm_pool_start(args):
+    arg_check(args, "pool-start", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.activate(ref)
+    else:
+        server.xend.cpu_pool.start(args[0])
+
+def brief_pool_list(sxprs):
+    format_str = "%-16s   %3s  %8s       %s          %s"
+    for sxpr in sxprs:
+        if sxpr == sxprs[0]:
+            print "Name               CPUs   Sched     Active   Domain count"
+        record = sxp2map(sxpr)
+        name = record['name_label']
+        sched_policy = record['sched_policy']
+        if record['activated']:
+            cpus = record.get('host_CPU_numbers', [])
+            vms = record.get('started_VM_names', [])
+            if not isinstance(cpus, types.ListType):
+                cpus = [cpus]
+            if not isinstance(vms, types.ListType):
+                vms = [vms]
+            cpu_count = len(cpus)
+            vm_count  = len(vms)
+            active = 'y'
+        else:
+            cpu_count = record['ncpu']
+            vm_count  = 0
+            active = 'n'
+        print format_str % (name, cpu_count, sched_policy, active, vm_count)
+
+def brief_pool_list_cpus(sxprs):
+    format_str = "%-16s %s"
+    for sxpr in sxprs:
+        if sxpr == sxprs[0]:
+            print format_str % ("Name", "CPU list")
+        record = sxp2map(sxpr)
+        name = record['name_label']
+        cpus = ""
+        if record['activated']:
+            cpus = record.get('host_CPU_numbers', [])
+            if isinstance(cpus, types.ListType):
+                cpus.sort()
+                cpus = reduce(lambda x,y: x + "%s," % y, cpus, "")
+                cpus = cpus[0:len(cpus)-1]
+            else:
+                cpus = str(cpus)
+        if len(cpus) == 0:
+            cpus = "-"
+        print format_str % (name, cpus)
+
+def xm_pool_list(args):
+    arg_check(args, "pool-list", 0, 2)
+    try:
+        (options, params) = getopt.gnu_getopt(args, 'lc', ['long','cpus'])
+    except getopt.GetoptError, opterr:
+        err(opterr)
+        usage('pool-list')
+    if len(params) > 1:
+        err("Only one pool name for selection allowed")
+        usage('pool-list')
+
+    use_long = False
+    show_cpus = False
+    for (k, _) in options:
+        if k in ['-l', '--long']:
+            use_long = True
+        if k in ['-c', '--cpus']:
+            show_cpus = True
+
+    if serverType == SERVER_XEN_API:
+        pools = server.xenapi.cpu_pool.get_all_records()
+        cpu_recs = server.xenapi.host_cpu.get_all_records()
+        sxprs = []
+        for pool in pools.values():
+            if pool['name_label'] in params or len(params) == 0:
+                started_VM_names = [['started_VM_names'] + [
+                    server.xenapi.VM.get_name_label(started_VM)
+                    for started_VM in pool['started_VMs'] ] ]
+                host_CPU_numbers = [['host_CPU_numbers'] + [
+                    cpu_recs[cpu_ref]['number']
+                    for cpu_ref in pool['host_CPUs'] ] ]
+                sxpr = [ pool['uuid'] ] + map_to_sxp(pool) + \
+                    host_CPU_numbers + started_VM_names
+                sxprs.append(sxpr)
+    else:
+        sxprs = server.xend.cpu_pool.list(params)
+
+    if len(params) > 0 and len(sxprs) == 0:
+        # pool not found
+        err("Pool '%s' does not exist." % params[0])
+
+    if use_long:
+        for sxpr in sxprs:
+            PrettyPrint.prettyprint(sxpr)
+    elif show_cpus:
+        brief_pool_list_cpus(sxprs)
+    else:
+        brief_pool_list(sxprs)
+
+def xm_pool_destroy(args):
+    arg_check(args, "pool-destroy", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.deactivate(ref)
+    else:
+        server.xend.cpu_pool.destroy(args[0])
+
+def xm_pool_delete(args):
+    arg_check(args, "pool-delete", 1)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        server.xenapi.cpu_pool.destroy(ref)
+    else:
+        server.xend.cpu_pool.delete(args[0])
+
+def xm_pool_cpu_add(args):
+    arg_check(args, "pool-cpu-add", 2)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        cpu_ref_list = server.xenapi.host_cpu.get_all_records()
+        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+                                  if c_rec['number'] == args[1] ]
+        if len(cpu_ref) == 0:
+            err('cpu number unknown')
+        else:
+            server.xenapi.cpu_pool.add_host_CPU_live(ref, cpu_ref[0])
+    else:
+        server.xend.cpu_pool.cpu_add(args[0], args[1])
+
+def xm_pool_cpu_remove(args):
+    arg_check(args, "pool-cpu-remove", 2)
+    if serverType == SERVER_XEN_API:
+        ref = get_pool_ref(args[0])
+        cpu_ref_list = server.xenapi.host_cpu.get_all_records()
+        cpu_ref = [ c_rec['uuid'] for c_rec in cpu_ref_list.values()
+                                  if c_rec['number'] ==  args[1] ]
+        if len(cpu_ref) == 0:
+            err('cpu number unknown')
+        else:
+            server.xenapi.cpu_pool.remove_host_CPU_live(ref, cpu_ref[0])
+    else:
+        server.xend.cpu_pool.cpu_remove(args[0], args[1])
+
+def xm_pool_migrate(args):
+    arg_check(args, "pool-migrate", 2)
+    domname = args[0]
+    poolname = args[1]
+    if serverType == SERVER_XEN_API:
+        pool_ref = get_pool_ref(poolname)
+        server.xenapi.VM.cpu_pool_migrate(get_single_vm(domname), pool_ref)
+    else:
+        server.xend.cpu_pool.migrate(domname, poolname)
 
 
 commands = {
@@ -3615,6 +3917,14 @@ commands = {
     "usb-list-assignable-devices": xm_usb_list_assignable_devices,
     "usb-hc-create": xm_usb_hc_create,
     "usb-hc-destroy": xm_usb_hc_destroy,
+    # pool
+    "pool-start": xm_pool_start,
+    "pool-list": xm_pool_list,
+    "pool-destroy": xm_pool_destroy,
+    "pool-delete": xm_pool_delete,
+    "pool-cpu-add": xm_pool_cpu_add,
+    "pool-cpu-remove": xm_pool_cpu_remove,
+    "pool-migrate": xm_pool_migrate,
     # tmem
     "tmem-thaw": xm_tmem_thaw,
     "tmem-freeze": xm_tmem_freeze,
@@ -3646,6 +3956,8 @@ IMPORTED_COMMANDS = [
     'resetpolicy',
     'getenforce',
     'setenforce',
+    'pool-create',
+    'pool-new',
     ]
 
 for c in IMPORTED_COMMANDS:
diff -r fadf63ab49e7 tools/python/xen/xm/pool-create.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool-create.py        Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,51 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Create a new unmanaged pool.
+"""
+
+import sys
+from xen.xm.main import serverType, SERVER_XEN_API, server
+from xen.xm.pool import parseCommandLine, err, help as help_options
+from xen.util.sxputils import sxp2map
+
+def help():
+    return help_options()
+
+
+def main(argv):
+    try:
+        (opts, config) = parseCommandLine(argv)
+    except StandardError, ex:
+        err(str(ex))
+
+    if not opts:
+        return
+
+    if serverType == SERVER_XEN_API:
+        record = sxp2map(config)
+        if type(record.get('proposed_CPUs', [])) != list:
+            record['proposed_CPUs'] = [record['proposed_CPUs']]
+        ref = server.xenapi.cpu_pool.create(record)
+        if ref:
+            server.xenapi.cpu_pool.activate(ref)
+    else:
+        server.xend.cpu_pool.create(config)
+
+if __name__ == '__main__':
+    main(sys.argv)
+
diff -r fadf63ab49e7 tools/python/xen/xm/pool-new.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool-new.py   Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,50 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Create a new managed pool.
+"""
+
+import sys
+from xen.xm.main import serverType, SERVER_XEN_API, server
+from xen.xm.pool import parseCommandLine, err, help as help_options
+from xen.util.sxputils import sxp2map
+
+
+def help():
+    return help_options()
+
+
+def main(argv):
+    try:
+        (opts, config) = parseCommandLine(argv)
+    except StandardError, ex:
+        err(str(ex))
+
+    if not opts:
+        return
+
+    if serverType == SERVER_XEN_API:
+        record = sxp2map(config)
+        if type(record.get('proposed_CPUs', [])) != list:
+            record['proposed_CPUs'] = [record['proposed_CPUs']]
+        server.xenapi.cpu_pool.create(record)
+    else:
+        server.xend.cpu_pool.new(config)
+
+if __name__ == '__main__':
+    main(sys.argv)
+
diff -r fadf63ab49e7 tools/python/xen/xm/pool.py
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/python/xen/xm/pool.py       Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,236 @@
+#============================================================================
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#============================================================================
+# Copyright (C) 2009 Fujitsu Technology Solutions
+#============================================================================
+
+""" Common function of cmds pool-new / pool-create.
+"""
+
+import sys
+import types
+import os
+
+from xen.xend import PrettyPrint
+from xen.xend import sxp
+
+from xen.xm.opts import Opts, set_value, set_true, append_value, OptionError
+
+GOPTS = Opts(use="""[options] [vars]
+
+Create a pool.
+
+Pool creation parameters can be set by command-line switches, from
+a python configuration script or an SXP config file. See documentation
+for --defconfig, --config. Configuration variables can be set using
+VAR=VAL on the command line. For example name=Pool-1 sets name to Pool-1.
+
+""")
+
+GOPTS.opt('help', short='h',
+          fn=set_true, default=0,
+          use="Print this help.")
+
+GOPTS.opt('help_config',
+          fn=set_true, default=0,
+          use="Print the available configuration variables (vars) for the "
+          "configuration script.")
+
+GOPTS.opt('path', val='PATH',
+          fn=set_value, default='.:/etc/xen/pool',
+          use="Search path for configuration scripts. "
+          "The value of PATH is a colon-separated directory list.")
+
+GOPTS.opt('defconfig', short='f', val='FILE',
+          fn=set_value, default='xmdefconfig',
+          use="Use the given Python configuration script."
+          "The configuration script is loaded after arguments have been "
+          "processed. Each command-line option sets a configuration "
+          "variable named after its long option name, and these "
+          "variables are placed in the environment of the script before "
+          "it is loaded. Variables for options that may be repeated have "
+          "list values. Other variables can be set using VAR=VAL on the "
+          "command line. "
+          "After the script is loaded, option values that were not set "
+          "on the command line are replaced by the values set in the script.")
+
+GOPTS.default('defconfig')
+
+GOPTS.opt('config', short='F', val='FILE',
+          fn=set_value, default=None,
+          use="CPU pool configuration to use (SXP).\n"
+          "SXP is the underlying configuration format used by Xen.\n"
+          "SXP configurations can be hand-written or generated from Python "
+          "configuration scripts, using the -n (dryrun) option to print "
+          "the configuration.")
+
+GOPTS.opt('dryrun', short='n',
+          fn=set_true, default=0,
+          use="Dry run - prints the resulting configuration in SXP but "
+          "does not create the CPU pool.")
+
+GOPTS.var('name', val='NAME', fn=set_value, default=None,
+          use="CPU pool name.")
+
+GOPTS.var('sched', val='SCHED', fn=set_value, default='credit',
+          use="Scheduler to use for the CPU pool.")
+
+GOPTS.var('cpus', val='CPUS', fn=set_value, default=1,
+          use="CPUS to assign to the CPU pool.")
+
+GOPTS.var('other_config', val='OTHER_CONFIG', fn=append_value, default=[],
+          use="Additional info for CPU pool")
+
+
+def sxp2map(sxp_val):
+    record = {}
+    for x in sxp_val:
+        if isinstance(x, (types.ListType, types.TupleType)) \
+           and len(x) > 1:
+            if isinstance(x[1], (types.ListType, types.TupleType)):
+                record[x[0]] = sxp2map(x[1])
+            else:
+                record[x[0]] = x[1]
+    return record
+
+def err(msg):
+    print >> sys.stderr, "Error: %s" % msg
+    sys.exit(-1)
+
+def make_cpus_config(cfg_cpus):
+    """ Taken from XendConfig. """
+    # Convert 'cpus' to list of list of ints
+
+    cpus_list = []
+    # Convert the following string to list of ints.
+    # The string supports a list of ranges (0-3),
+    # seperated by commas, and negation (^1).
+    # Precedence is settled by order of the string:
+    #    "0-3,^1"      -> [0,2,3]
+    #    "0-3,^1,1"    -> [0,1,2,3]
+    def cnv(s):
+        l = []
+        for c in s.split(','):
+            if c.find('-') != -1:
+                (x, y) = c.split('-')
+                for i in range(int(x), int(y)+1):
+                    l.append(int(i))
+            else:
+                # remove this element from the list
+                if len(c) > 0:
+                    if c[0] == '^':
+                        l = [x for x in l if x != int(c[1:])]
+                    else:
+                        l.append(int(c))
+        return l
+
+    if type(cfg_cpus) == list:
+        if len(cfg_cpus) > 0 and type(cfg_cpus[0]) == list:
+            # If sxp_cfg was created from config.sxp,
+            # the form of 'cpus' is list of list of string.
+            # Convert 'cpus' to list of list of ints.
+            # Conversion examples:
+            #    [['1']]               -> [[1]]
+            #    [['0','2'],['1','3']] -> [[0,2],[1,3]]
+            try:
+                for c1 in cfg_cpus:
+                    cpus = []
+                    for c2 in c1:
+                        cpus.append(int(c2))
+                    cpus_list.append(cpus)
+            except ValueError, e:
+                raise err('cpus = %s: %s' % (cfg_cpus, e))
+        else:
+            # Conversion examples:
+            #    ["1"]               -> [[1]]
+            #    ["0,2","1,3"]       -> [[0,2],[1,3]]
+            #    ["0-3,^1","1-4,^2"] -> [[0,2,3],[1,3,4]]
+            try:
+                for c in cfg_cpus:
+                    cpus = cnv(c)
+                    cpus_list.append(cpus)
+            except ValueError, e:
+                raise err('cpus = %s: %s' % (cfg_cpus, e))
+    else:
+        # Conversion examples:
+        #  cpus=1:
+        #    "1"      -> [[1]]
+        #    "0-3,^1" -> [[0,2,3]]
+        #  cpus=2:
+        #    "1"      -> [[1],[1]]
+        #    "0-3,^1" -> [[0,2,3],[0,2,3]]
+        try:
+            cpus_list = cnv(cfg_cpus)
+        except ValueError, e:
+            err('cpus = %s: %s' % (cfg_cpus, e))
+    return cpus_list
+
+def make_config(vals):
+    config  = ['pool']
+    config += [['name_label', vals.name]]
+    config += [['sched_policy', vals.sched]]
+    if type(vals.cpus) == int:
+        config +=  [['ncpu', vals.cpus], ['proposed_CPUs' , []]]
+    elif type(vals.cpus) == str and len(vals.cpus) > 1 and vals.cpus[0] == '#':
+        try:
+            config +=  [['ncpu', int(vals.cpus[1:])], ['proposed_CPUs' , []]]
+        except ValueError, ex:
+            err('Wrong illegal of parameter "cpus"')
+    else:
+        prop_cpus = make_cpus_config(vals.cpus)
+        config +=  [['ncpu', len(prop_cpus)],
+                    ['proposed_CPUs'] + prop_cpus]
+    other_config = []
+    for entry in vals.other_config:
+        if '=' in entry:
+            (var, val) = entry.strip().split('=', 1)
+            other_config.append([var, val])
+    config +=  [['other_config'] + other_config]
+    return config
+
+def parseCommandLine(argv):
+    GOPTS.reset()
+    args = GOPTS.parse(argv)
+
+    if GOPTS.vals.help or GOPTS.vals.help_config:
+        if GOPTS.vals.help_config:
+            print GOPTS.val_usage()
+        return (None, None)
+
+    # Process remaining args as config variables.
+    for arg in args:
+        if '=' in arg:
+            (var, val) = arg.strip().split('=', 1)
+            GOPTS.setvar(var.strip(), val.strip())
+    if GOPTS.vals.config:
+        try:
+            config = sxp.parse(file(GOPTS.vals.config))[0]
+        except IOError, ex:
+            raise OptionError("Cannot read file %s: %s" % (config, ex[1]))
+    else:
+        GOPTS.load_defconfig()
+        if not GOPTS.getopt('name') and GOPTS.getopt('defconfig'):
+            GOPTS.setopt('name', os.path.basename(
+                GOPTS.getopt('defconfig')))
+        config = make_config(GOPTS.vals)
+
+    if GOPTS.vals.dryrun:
+        PrettyPrint.prettyprint(config)
+        return (None, None)
+
+    return (GOPTS, config)
+
+def help():
+    return str(GOPTS)
+
diff -r fadf63ab49e7 tools/python/xen/xm/xenapi_create.py
--- a/tools/python/xen/xm/xenapi_create.py      Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/python/xen/xm/xenapi_create.py      Tue Apr 20 11:10:40 2010 +0200
@@ -310,6 +310,8 @@ class xenapi_create:
                 get_child_nodes_as_dict(vm, "platform", "key", "value"),
             "other_config":
                 get_child_nodes_as_dict(vm, "other_config", "key", "value"),
+            "pool_name":
+                vm.attributes["pool_name"].value,
             "PV_bootloader":
                 "",
             "PV_kernel":
@@ -696,6 +698,8 @@ class sxp2xml:
             = str(get_child_by_name(config, "s3_integrity", 0))
         vm.attributes["superpages"] \
             = str(get_child_by_name(config, "superpages", 0))
+        vm.attributes["pool_name"] \
+            = str(get_child_by_name(config, "pool_name", "Pool-0"))
 
         sec_data = get_child_by_name(config, "security")
         if sec_data:
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [Patch 4/6] Cpupools: python scripts, Juergen Gross <=