WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] cpupools [2/6]: libxc changes

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] cpupools [2/6]: libxc changes
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 22 Apr 2010 04:30:27 -0700
Delivery-date: Thu, 22 Apr 2010 04:31:22 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1271850540 -3600
# Node ID ce1003709cf07807da5a4868e7daa929443faef9
# Parent  de94884a669cb23a58c50555c3d9e7f41b847d2d
cpupools [2/6]: libxc changes

Signed-off-by: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
---
 tools/libxc/Makefile     |    1 
 tools/libxc/xc_cpupool.c |  154 +++++++++++++++++++++++++++++++++++++++++++++++
 tools/libxc/xc_domain.c  |    1 
 tools/libxc/xc_private.h |   13 +++
 tools/libxc/xenctrl.h    |   95 ++++++++++++++++++++++++++++
 5 files changed, 264 insertions(+)

diff -r de94884a669c -r ce1003709cf0 tools/libxc/Makefile
--- a/tools/libxc/Makefile      Wed Apr 21 12:48:03 2010 +0100
+++ b/tools/libxc/Makefile      Wed Apr 21 12:49:00 2010 +0100
@@ -8,6 +8,7 @@ CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-y       += xc_core.c
 CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c
 CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c
+CTRL_SRCS-y       += xc_cpupool.c
 CTRL_SRCS-y       += xc_domain.c
 CTRL_SRCS-y       += xc_evtchn.c
 CTRL_SRCS-y       += xc_misc.c
diff -r de94884a669c -r ce1003709cf0 tools/libxc/xc_cpupool.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_cpupool.c  Wed Apr 21 12:49:00 2010 +0100
@@ -0,0 +1,154 @@
+/******************************************************************************
+ * xc_cpupool.c
+ *
+ * API for manipulating and obtaining information on cpupools.
+ *
+ * Copyright (c) 2009, J Gross.
+ */
+
+#include <stdarg.h>
+#include "xc_private.h"
+
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id)
+{
+    int err;
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE;
+    domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ?
+        XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid;
+    domctl.u.cpupool_op.sched_id = sched_id;
+    if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 )
+        return err;
+
+    *ppoolid = domctl.u.cpupool_op.cpupool_id;
+    return 0;
+}
+
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_getinfo(int xc_handle, 
+                       uint32_t first_poolid,
+                       uint32_t n_max, 
+                       xc_cpupoolinfo_t *info)
+{
+    int err = 0;
+    int p;
+    uint32_t poolid = first_poolid;
+    uint8_t local[sizeof (info->cpumap)];
+    DECLARE_DOMCTL;
+
+    memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+
+    for (p = 0; p < n_max; p++)
+    {
+        domctl.cmd = XEN_DOMCTL_cpupool_op;
+        domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO;
+        domctl.u.cpupool_op.cpupool_id = poolid;
+        set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+        domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
+
+        if ( (err = lock_pages(local, sizeof(local))) != 0 )
+        {
+            PERROR("Could not lock memory for Xen hypercall");
+            break;
+        }
+        err = do_domctl_save(xc_handle, &domctl);
+        unlock_pages(local, sizeof (local));
+
+        if ( err < 0 )
+            break;
+
+        info->cpupool_id = domctl.u.cpupool_op.cpupool_id;
+        info->sched_id = domctl.u.cpupool_op.sched_id;
+        info->n_dom = domctl.u.cpupool_op.n_dom;
+        bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
+        poolid = domctl.u.cpupool_op.cpupool_id + 1;
+        info++;
+    }
+
+    if ( p == 0 )
+        return err;
+
+    return p;
+}
+
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN;
+    domctl.u.cpupool_op.cpupool_id = poolid;
+    domctl.u.cpupool_op.domid = domid;
+    return do_domctl_save(xc_handle, &domctl);
+}
+
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap)
+{
+    int err;
+    uint8_t local[sizeof (*cpumap)];
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_cpupool_op;
+    domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO;
+    set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local);
+    domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+
+    if ( (err = lock_pages(local, sizeof(local))) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        return err;
+    }
+
+    err = do_domctl_save(xc_handle, &domctl);
+    unlock_pages(local, sizeof (local));
+
+    if (err < 0)
+        return err;
+
+    bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
+
+    return 0;
+}
diff -r de94884a669c -r ce1003709cf0 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Wed Apr 21 12:48:03 2010 +0100
+++ b/tools/libxc/xc_domain.c   Wed Apr 21 12:49:00 2010 +0100
@@ -220,6 +220,7 @@ int xc_domain_getinfo(int xc_handle,
         info->cpu_time = domctl.u.getdomaininfo.cpu_time;
         info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus;
         info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
+        info->cpupool = domctl.u.getdomaininfo.cpupool;
 
         memcpy(info->handle, domctl.u.getdomaininfo.handle,
                sizeof(xen_domain_handle_t));
diff -r de94884a669c -r ce1003709cf0 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Wed Apr 21 12:48:03 2010 +0100
+++ b/tools/libxc/xc_private.h  Wed Apr 21 12:49:00 2010 +0100
@@ -161,6 +161,19 @@ static inline int do_domctl(int xc_handl
     hcall_buf_release((void **)&domctl, sizeof(*domctl));
 
  out1:
+    return ret;
+}
+
+static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl)
+{
+    int ret;
+
+    do
+    {
+        ret = do_domctl(xc_handle, domctl);
+    }
+    while ( (ret < 0 ) && (errno == EAGAIN) );
+
     return ret;
 }
 
diff -r de94884a669c -r ce1003709cf0 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Wed Apr 21 12:48:03 2010 +0100
+++ b/tools/libxc/xenctrl.h     Wed Apr 21 12:49:00 2010 +0100
@@ -171,6 +171,7 @@ typedef struct xc_dominfo {
     unsigned int  nr_online_vcpus;
     unsigned int  max_vcpu_id;
     xen_domain_handle_t handle;
+    unsigned int  cpupool;
 } xc_dominfo_t;
 
 typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
@@ -508,6 +509,100 @@ int xc_domain_setdebugging(int xc_handle
 int xc_domain_setdebugging(int xc_handle,
                            uint32_t domid,
                            unsigned int enable);
+
+/*
+ * CPUPOOL MANAGEMENT FUNCTIONS
+ */
+
+typedef struct xc_cpupoolinfo {
+    uint32_t cpupool_id;
+    uint32_t sched_id;
+    uint32_t n_dom;
+    uint64_t cpumap;
+} xc_cpupoolinfo_t;
+
+/**
+ * Create a new cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm ppoolid pointer to the new cpupool id (in/out)
+ * @parm sched_id id of scheduler to use for pool
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_create(int xc_handle,
+                      uint32_t *ppoolid,
+                      uint32_t sched_id);
+
+/**
+ * Destroy a cpupool. Pool must be unused and have no cpu assigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool to destroy
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_destroy(int xc_handle,
+                       uint32_t poolid);
+
+/**
+ * Get cpupool info. Returns info for up to the specified number of cpupools
+ * starting at the given id.
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm first_poolid lowest id for which info is returned
+ * @parm n_max maximum number of cpupools to return info
+ * @parm info pointer to xc_cpupoolinfo_t array
+ * return number of cpupool infos
+ */
+int xc_cpupool_getinfo(int xc_handle,
+                       uint32_t first_poolid,
+                       uint32_t n_max,
+                       xc_cpupoolinfo_t *info);
+
+/**
+ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to add
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_addcpu(int xc_handle,
+                      uint32_t poolid,
+                      int cpu);
+
+/**
+ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the cpupool
+ * @parm cpu cpu number to remove
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_removecpu(int xc_handle,
+                         uint32_t poolid,
+                         int cpu);
+
+/**
+ * Move domain to another cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm poolid id of the destination cpupool
+ * @parm domid id of the domain to move
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_movedomain(int xc_handle,
+                          uint32_t poolid,
+                          uint32_t domid);
+
+/**
+ * Return map of cpus not in any cpupool.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface
+ * @parm cpumap pointer where to store the cpumap
+ * return 0 on success, -1 on failure
+ */
+int xc_cpupool_freeinfo(int xc_handle,
+                        uint64_t *cpumap);
+
 
 /*
  * EVENT CHANNEL FUNCTIONS

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] cpupools [2/6]: libxc changes, Xen patchbot-unstable <=