WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [Patch 2/6] Cpupools: libxc part

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [Patch 2/6] Cpupools: libxc part
From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Date: Tue, 20 Apr 2010 11:39:15 +0200
Delivery-date: Tue, 20 Apr 2010 02:42:09 -0700
Dkim-signature: v=1; a=rsa-sha256; c=simple/simple; d=ts.fujitsu.com; i=juergen.gross@xxxxxxxxxxxxxx; q=dns/txt; s=s1536b; t=1271756357; x=1303292357; h=message-id:date:from:mime-version:to:subject; z=Message-ID:=20<4BCD7643.9050708@xxxxxxxxxxxxxx>|Date:=20 Tue,=2020=20Apr=202010=2011:39:15=20+0200|From:=20Juergen =20Gross=20<juergen.gross@xxxxxxxxxxxxxx>|MIME-Version: =201.0|To:=20"xen-devel@xxxxxxxxxxxxxxxxxxx"=20<xen-devel @lists.xensource.com>|Subject:=20[Patch=202/6]=20Cpupools :=20libxc=20part; bh=KvO0WNt/ENjoKE0spNckkkMItJOzXAoItuKqSa/uNNE=; b=MhdxZVqve++aAdIzoZ3rN6oPpfrzhduQtV5lrI41wvVDMS7FDNe9xU9R lo+DJJLGclF1l7EoLZge4n8yx/Pv1rU4EKHCOgpz8rqmhzt81ckhx6xkj sNGka0Ho0ByMJiuGSNR4PWlE4v9sQGAYJfX90yb2UtsBwmWfvUYjRuvf2 Z/7ZY85HKGYOghKS9PBh9t9SbfhWKNau9aoljN+MzHg2MiXYZm0pnagde RCd74O07Pe1RFLBh4ObbHkwsyngCP;
Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=TcpLhCsLPrJJ1x2y8e0f9Wh54PI3JaAWMOYauvR1X6uo0upu6fueNTJU RiHabp+YpklThQvszDp5KZJC08r3sAycaPQzLV56Q5Q7Ka2HwjwZb1wgx c0puUeiS/TFABkcizaQ4x1bqCkRt1T9tXPdjix/NWNy2DS9bxlrOBeSqI ELBtTHnyl7TEpCe3RU9op5+WbvUcX6oOm9sdNmXUZisUeQecOjhmj8yol d4+ebFHIaZO43AxSWCesRP6Q4QqUG;
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Fujitsu Technology Solutions
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla-Thunderbird 2.0.0.24 (X11/20100329)
-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technology Solutions              e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html
Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

diff -r fadf63ab49e7 tools/libxc/Makefile
--- a/tools/libxc/Makefile      Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxc/Makefile      Tue Apr 20 11:10:40 2010 +0200
@@ -8,6 +8,7 @@ CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
 CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y       += xc_cpupool.c
 CTRL_SRCS-y       += xc_domain.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
diff -r fadf63ab49e7 tools/libxc/xc_cpupool.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpupool.c  Tue Apr 20 11:10:40 2010 +0200
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+    domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    domctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+        return err;
+
+    *ppoolid = domctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle, 
+                       uint32_t first_poolid,
+                       uint32_t n_max, 
+                       xc_cpupoolinfo_t *info)
+{
+    int err = 0;
+    int p;
+    uint32_t poolid = first_poolid;
+    uint8_t local[sizeof (info->cpumap)];
+    DECLARE_DOMCTL;
+
+    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+    for (p = 0; p < n_max; p++)
+    {
+        domctl.cmd = XEN_DOMCTL_cpupool_op;
+        domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+        domctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+        domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+        if ( (err = lock_pages(local, sizeof(local))) != 0 )
+        {
+            PERROR("Could not lock memory for Xen hypercall");
+            break;
+        }
+        err = do_domctl_save(xc_handle, &domctl);
+        unlock_pages(local, sizeof (local));
+
+        if ( err < 0 )
+            break;
+
+        info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+        info->sched_id = domctl.u.cpupool_op.sched_id;
+        info->n_dom = domctl.u.cpupool_op.n_dom;
+        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+        poolid = domctl.u.cpupool_op.cpupool_id + 1;
+        info++;
+    }
+
+    if ( p == 0 )
+        return err;
+
+    return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.domid = domid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap)
+{
+    int err;
+    uint8_t local[sizeof (*cpumap)];
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+    domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        return err;
+    }
+
+    err = do_domctl_save(xc_handle, &domctl);
+    unlock_pages(local, sizeof (local));
+
+    if (err < 0)
+        return err;
+
+    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+    return 0;
+}
diff -r fadf63ab49e7 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxc/xc_domain.c   Tue Apr 20 11:10:40 2010 +0200
@@ -6,6 +6,7 @@
  * Copyright (c) 2003, K A Fraser.
  */
 
+#include <stdarg.h>
 #include "xc_private.h"
 #include "xg_save_restore.h"
 #include <xen/memory.h>
@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
                      uint32_t flags,
-                     uint32_t *pdomid)
+                     uint32_t *pdomid, ...)
 {
     int err;
+    va_list ap;
     DECLARE_DOMCTL;
 
     domctl.cmd = XEN_DOMCTL_createdomain;
     domctl.domain = (domid_t)*pdomid;
     domctl.u.createdomain.ssidref = ssidref;
     domctl.u.createdomain.flags   = flags;
+    if ( flags & XEN_DOMCTL_CDF_pool ) {
+        va_start(ap, pdomid);
+        domctl.u.createdomain.cpupool = va_arg(ap, uint32_t);
+        va_end(ap);
+    }
     memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
     if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
         return err;
@@ -220,6 +227,7 @@ int xc_domain_getinfo(int xc_handle,
         info->cpu_time = domctl.u.getdomaininfo.cpu_time;
         info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
         info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
diff -r fadf63ab49e7 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxc/xc_private.h  Tue Apr 20 11:10:40 2010 +0200
@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl
     return ret;
 }
 
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+    int ret;
+
+    do
+    {
+        ret = do_domctl(xc_handle, domctl);
+    }
+    while ( (ret < 0 ) && (errno == EAGAIN) );
+
+    return ret;
+}
+
 static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl)
 {
     int ret = -1;
diff -r fadf63ab49e7 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Mon Apr 19 17:57:28 2010 +0100
+++ b/tools/libxc/xenctrl.h     Tue Apr 20 11:10:40 2010 +0200
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    unsigned int  cpupool;
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
                      uint32_t flags,
-                     uint32_t *pdomid);
+                     uint32_t *pdomid, ...);
 
 
 /* Functions to produce a dump of a given domain
@@ -483,6 +484,14 @@ int xc_sched_credit2_domain_get(int xc_h
                                uint32_t domid,
                                struct xen_domctl_sched_credit2 *sdom);
 
+int xc_sched_credit2_domain_set(int xc_handle,
+                               uint32_t domid,
+                               struct xen_domctl_sched_credit2 *sdom);
+
+int xc_sched_credit2_domain_get(int xc_handle,
+                               uint32_t domid,
+                               struct xen_domctl_sched_credit2 *sdom);
+
 /**
  * This function sends a trigger to a domain.
  *
@@ -508,6 +517,100 @@ int xc_domain_setdebugging(int xc_handle
 int xc_domain_setdebugging(int xc_handle,
                            uint32_t domid,
                            unsigned int enable);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap);
+
 
 /*
  * EVENT CHANNEL FUNCTIONS
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>