WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 14 of 24] libxc: convert sysctl interfaces over to hy

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 14 of 24] libxc: convert sysctl interfaces over to hypercall buffers
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Mon, 06 Sep 2010 14:38:34 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Mon, 06 Sep 2010 07:02:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1283780300@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1283779691 -3600
# Node ID 92e9795d06413325c84f2220cf33c0dd831e8355
# Parent  2a5e84fe718ae25e91785643388411b70d4c013b
libxc: convert sysctl interfaces over to hypercall buffers

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_cpupool.c
--- a/tools/libxc/xc_cpupool.c  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_cpupool.c  Mon Sep 06 14:28:11 2010 +0100
@@ -72,8 +72,14 @@ int xc_cpupool_getinfo(xc_interface *xch
     int err = 0;
     int p;
     uint32_t poolid = first_poolid;
-    uint8_t local[sizeof (info->cpumap)];
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+
+    local = xc_hypercall_buffer_alloc(xch, local, sizeof (info->cpumap));
+    if ( local == NULL ) {
+        PERROR("Could not allocate locked memory for Xen hypercall");
+        return -ENOMEM;
+    }
 
     memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
 
@@ -82,17 +88,10 @@ int xc_cpupool_getinfo(xc_interface *xch
         sysctl.cmd = XEN_SYSCTL_cpupool_op;
         sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
         sysctl.u.cpupool_op.cpupool_id = poolid;
-        set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+        xc_set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
         sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
 
-        if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
-        {
-            PERROR("Could not lock memory for Xen hypercall");
-            break;
-        }
         err = do_sysctl_save(xch, &sysctl);
-        unlock_pages(xch, local, sizeof (local));
-
         if ( err < 0 )
             break;
 
@@ -103,6 +102,8 @@ int xc_cpupool_getinfo(xc_interface *xch
         poolid = sysctl.u.cpupool_op.cpupool_id + 1;
         info++;
     }
+
+    xc_hypercall_buffer_free(xch, local);
 
     if ( p == 0 )
         return err;
@@ -153,27 +154,28 @@ int xc_cpupool_freeinfo(xc_interface *xc
                         uint64_t *cpumap)
 {
     int err;
-    uint8_t local[sizeof (*cpumap)];
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+
+    local = xc_hypercall_buffer_alloc(xch, local, sizeof (*cpumap));
+    if ( local == NULL ) {
+        PERROR("Could not allocate locked memory for Xen hypercall");
+        return -ENOMEM;
+    }
 
     sysctl.cmd = XEN_SYSCTL_cpupool_op;
     sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
-    set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+    xc_set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
     sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
 
-    if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        return err;
-    }
-
     err = do_sysctl_save(xch, &sysctl);
-    unlock_pages(xch, local, sizeof (local));
 
     if (err < 0)
         return err;
 
     bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
 
+    xc_hypercall_buffer_free(xch, local);
+
     return 0;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
@@ -245,21 +245,22 @@ int xc_domain_getinfolist(xc_interface *
 {
     int ret = 0;
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(info, max_domains*sizeof(*info), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
 
-    if ( lock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, info) )
         return -1;
 
     sysctl.cmd = XEN_SYSCTL_getdomaininfolist;
     sysctl.u.getdomaininfolist.first_domain = first_domain;
     sysctl.u.getdomaininfolist.max_domains  = max_domains;
-    set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
+    xc_set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info);
 
     if ( xc_sysctl(xch, &sysctl) < 0 )
         ret = -1;
     else
         ret = sysctl.u.getdomaininfolist.num_domains;
 
-    unlock_pages(xch, info, max_domains*sizeof(xc_domaininfo_t));
+    xc_hypercall_bounce_post(xch, info);
 
     return ret;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_misc.c     Mon Sep 06 14:28:11 2010 +0100
@@ -27,11 +27,15 @@ int xc_readconsolering(xc_interface *xch
                        int clear, int incremental, uint32_t *pindex)
 {
     int ret;
+    unsigned int nr_chars = *pnr_chars;
     DECLARE_SYSCTL;
-    unsigned int nr_chars = *pnr_chars;
+    DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, buffer) )
+        return -1;
 
     sysctl.cmd = XEN_SYSCTL_readconsole;
-    set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
+    xc_set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
     sysctl.u.readconsole.count = nr_chars;
     sysctl.u.readconsole.clear = clear;
     sysctl.u.readconsole.incremental = 0;
@@ -41,9 +45,6 @@ int xc_readconsolering(xc_interface *xch
         sysctl.u.readconsole.incremental = incremental;
     }
 
-    if ( (ret = lock_pages(xch, buffer, nr_chars)) != 0 )
-        return ret;
-
     if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
     {
         *pnr_chars = sysctl.u.readconsole.count;
@@ -51,7 +52,7 @@ int xc_readconsolering(xc_interface *xch
             *pindex = sysctl.u.readconsole.index;
     }
 
-    unlock_pages(xch, buffer, nr_chars);
+    xc_hypercall_bounce_post(xch, buffer);
 
     return ret;
 }
@@ -60,17 +61,18 @@ int xc_send_debug_keys(xc_interface *xch
 {
     int ret, len = strlen(keys);
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(keys, len, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, keys) )
+        return -1;
 
     sysctl.cmd = XEN_SYSCTL_debug_keys;
-    set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
+    xc_set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
     sysctl.u.debug_keys.nr_keys = len;
-
-    if ( (ret = lock_pages(xch, keys, len)) != 0 )
-        return ret;
 
     ret = do_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, keys, len);
+    xc_hypercall_bounce_post(xch, keys);
 
     return ret;
 }
@@ -173,8 +175,8 @@ int xc_perfc_reset(xc_interface *xch)
 
     sysctl.cmd = XEN_SYSCTL_perfc_op;
     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, NULL);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, NULL);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
 
     return do_sysctl(xch, &sysctl);
 }
@@ -188,8 +190,8 @@ int xc_perfc_query_number(xc_interface *
 
     sysctl.cmd = XEN_SYSCTL_perfc_op;
     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, NULL);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, NULL);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
 
     rc = do_sysctl(xch, &sysctl);
 
@@ -202,15 +204,17 @@ int xc_perfc_query_number(xc_interface *
 }
 
 int xc_perfc_query(xc_interface *xch,
-                   xc_perfc_desc_t *desc,
-                   xc_perfc_val_t *val)
+                   struct xc_hypercall_buffer *desc,
+                   struct xc_hypercall_buffer *val)
 {
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
 
     sysctl.cmd = XEN_SYSCTL_perfc_op;
     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
-    set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
-    set_xen_guest_handle(sysctl.u.perfc_op.val, val);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
+    xc_set_xen_guest_handle(sysctl.u.perfc_op.val, val);
 
     return do_sysctl(xch, &sysctl);
 }
@@ -221,7 +225,7 @@ int xc_lockprof_reset(xc_interface *xch)
 
     sysctl.cmd = XEN_SYSCTL_lockprof_op;
     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, NULL);
+    xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
 
     return do_sysctl(xch, &sysctl);
 }
@@ -234,7 +238,7 @@ int xc_lockprof_query_number(xc_interfac
 
     sysctl.cmd = XEN_SYSCTL_lockprof_op;
     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, NULL);
+    xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
 
     rc = do_sysctl(xch, &sysctl);
 
@@ -244,17 +248,18 @@ int xc_lockprof_query_number(xc_interfac
 }
 
 int xc_lockprof_query(xc_interface *xch,
-                        uint32_t *n_elems,
-                        uint64_t *time,
-                        xc_lockprof_data_t *data)
+                      uint32_t *n_elems,
+                      uint64_t *time,
+                      struct xc_hypercall_buffer *data)
 {
     int rc;
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
 
     sysctl.cmd = XEN_SYSCTL_lockprof_op;
     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
     sysctl.u.lockprof_op.max_elem = *n_elems;
-    set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
+    xc_set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
 
     rc = do_sysctl(xch, &sysctl);
 
@@ -268,20 +273,21 @@ int xc_getcpuinfo(xc_interface *xch, int
 {
     int rc;
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, info) )
+        return -1;
 
     sysctl.cmd = XEN_SYSCTL_getcpuinfo;
-    sysctl.u.getcpuinfo.max_cpus = max_cpus; 
-    set_xen_guest_handle(sysctl.u.getcpuinfo.info, info); 
-
-    if ( (rc = lock_pages(xch, info, max_cpus*sizeof(*info))) != 0 )
-        return rc;
+    sysctl.u.getcpuinfo.max_cpus = max_cpus;
+    xc_set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
 
     rc = do_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, info, max_cpus*sizeof(*info));
+    xc_hypercall_bounce_post(xch, info);
 
     if ( nr_cpus )
-        *nr_cpus = sysctl.u.getcpuinfo.nr_cpus; 
+        *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
 
     return rc;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_offline_page.c
--- a/tools/libxc/xc_offline_page.c     Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_offline_page.c     Mon Sep 06 14:28:11 2010 +0100
@@ -66,12 +66,13 @@ int xc_mark_page_online(xc_interface *xc
                         unsigned long end, uint32_t *status)
 {
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     int ret = -1;
 
     if ( !status || (end < start) )
         return -EINVAL;
 
-    if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+    if ( xc_hypercall_bounce_pre(xch, status) )
     {
         ERROR("Could not lock memory for xc_mark_page_online\n");
         return -EINVAL;
@@ -81,10 +82,10 @@ int xc_mark_page_online(xc_interface *xc
     sysctl.u.page_offline.start = start;
     sysctl.u.page_offline.cmd = sysctl_page_online;
     sysctl.u.page_offline.end = end;
-    set_xen_guest_handle(sysctl.u.page_offline.status, status);
+    xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
     ret = xc_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+    xc_hypercall_bounce_post(xch, status);
 
     return ret;
 }
@@ -93,12 +94,13 @@ int xc_mark_page_offline(xc_interface *x
                           unsigned long end, uint32_t *status)
 {
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     int ret = -1;
 
     if ( !status || (end < start) )
         return -EINVAL;
 
-    if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+    if ( xc_hypercall_bounce_pre(xch, status) )
     {
         ERROR("Could not lock memory for xc_mark_page_offline");
         return -EINVAL;
@@ -108,10 +110,10 @@ int xc_mark_page_offline(xc_interface *x
     sysctl.u.page_offline.start = start;
     sysctl.u.page_offline.cmd = sysctl_page_offline;
     sysctl.u.page_offline.end = end;
-    set_xen_guest_handle(sysctl.u.page_offline.status, status);
+    xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
     ret = xc_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+    xc_hypercall_bounce_post(xch, status);
 
     return ret;
 }
@@ -120,12 +122,13 @@ int xc_query_page_offline_status(xc_inte
                                  unsigned long end, uint32_t *status)
 {
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BOUNCE(status, sizeof(uint32_t)*(end - start + 1), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     int ret = -1;
 
     if ( !status || (end < start) )
         return -EINVAL;
 
-    if (lock_pages(xch, status, sizeof(uint32_t)*(end - start + 1)))
+    if ( xc_hypercall_bounce_pre(xch, status) )
     {
         ERROR("Could not lock memory for xc_query_page_offline_status\n");
         return -EINVAL;
@@ -135,10 +138,10 @@ int xc_query_page_offline_status(xc_inte
     sysctl.u.page_offline.start = start;
     sysctl.u.page_offline.cmd = sysctl_query_page_offline;
     sysctl.u.page_offline.end = end;
-    set_xen_guest_handle(sysctl.u.page_offline.status, status);
+    xc_set_xen_guest_handle(sysctl.u.page_offline.status, status);
     ret = xc_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, status, sizeof(uint32_t)*(end - start + 1));
+    xc_hypercall_bounce_post(xch, status);
 
     return ret;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_pm.c
--- a/tools/libxc/xc_pm.c       Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_pm.c       Mon Sep 06 14:28:11 2010 +0100
@@ -45,6 +45,10 @@ int xc_pm_get_pxstat(xc_interface *xch, 
 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
 {
     DECLARE_SYSCTL;
+    /* Sizes unknown until xc_pm_get_max_px */
+    DECLARE_NAMED_HYPERCALL_BOUNCE(trans, &pxpt->trans_pt, 0, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(pt, &pxpt->pt, 0, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
     int max_px, ret;
 
     if ( !pxpt || !(pxpt->trans_pt) || !(pxpt->pt) )
@@ -53,14 +57,15 @@ int xc_pm_get_pxstat(xc_interface *xch, 
     if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
         return ret;
 
-    if ( (ret = lock_pages(xch, pxpt->trans_pt, 
-        max_px * max_px * sizeof(uint64_t))) != 0 )
+    HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t));
+    HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val));
+
+    if ( xc_hypercall_bounce_pre(xch, trans) )
         return ret;
 
-    if ( (ret = lock_pages(xch, pxpt->pt, 
-        max_px * sizeof(struct xc_px_val))) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, pt) )
     {
-        unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
+        xc_hypercall_bounce_post(xch, trans);
         return ret;
     }
 
@@ -68,15 +73,14 @@ int xc_pm_get_pxstat(xc_interface *xch, 
     sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
     sysctl.u.get_pmstat.cpuid = cpuid;
     sysctl.u.get_pmstat.u.getpx.total = max_px;
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, pxpt->trans_pt);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, 
-                        (pm_px_val_t *)pxpt->pt);
+    xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
+    xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
 
     ret = xc_sysctl(xch, &sysctl);
     if ( ret )
     {
-        unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
-        unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
+       xc_hypercall_bounce_post(xch, trans);
+       xc_hypercall_bounce_post(xch, pt);
         return ret;
     }
 
@@ -85,8 +89,8 @@ int xc_pm_get_pxstat(xc_interface *xch, 
     pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
     pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
 
-    unlock_pages(xch, pxpt->trans_pt, max_px * max_px * sizeof(uint64_t));
-    unlock_pages(xch, pxpt->pt, max_px * sizeof(struct xc_px_val));
+    xc_hypercall_bounce_post(xch, trans);
+    xc_hypercall_bounce_post(xch, pt);
 
     return ret;
 }
@@ -120,6 +124,8 @@ int xc_pm_get_cxstat(xc_interface *xch, 
 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
 {
     DECLARE_SYSCTL;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, &cxpt->triggers, 0, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, &cxpt->residencies, 0, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
     int max_cx, ret;
 
     if( !cxpt || !(cxpt->triggers) || !(cxpt->residencies) )
@@ -128,22 +134,23 @@ int xc_pm_get_cxstat(xc_interface *xch, 
     if ( (ret = xc_pm_get_max_cx(xch, cpuid, &max_cx)) )
         goto unlock_0;
 
-    if ( (ret = lock_pages(xch, cxpt, sizeof(struct xc_cx_stat))) )
+    HYPERCALL_BOUNCE_SET_SIZE(triggers, max_cx * sizeof(uint64_t));
+    HYPERCALL_BOUNCE_SET_SIZE(residencies, max_cx * sizeof(uint64_t));
+
+    ret = -1;
+    if ( xc_hypercall_bounce_pre(xch, triggers) )
         goto unlock_0;
-    if ( (ret = lock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t))) )
+    if ( xc_hypercall_bounce_pre(xch, residencies) )
         goto unlock_1;
-    if ( (ret = lock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t))) 
)
-        goto unlock_2;
 
     sysctl.cmd = XEN_SYSCTL_get_pmstat;
     sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
     sysctl.u.get_pmstat.cpuid = cpuid;
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, cxpt->triggers);
-    set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, 
-                         cxpt->residencies);
+    xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
+    xc_set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, 
residencies);
 
     if ( (ret = xc_sysctl(xch, &sysctl)) )
-        goto unlock_3;
+        goto unlock_2;
 
     cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
     cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
@@ -154,12 +161,10 @@ int xc_pm_get_cxstat(xc_interface *xch, 
     cxpt->cc3 = sysctl.u.get_pmstat.u.getcx.cc3;
     cxpt->cc6 = sysctl.u.get_pmstat.u.getcx.cc6;
 
-unlock_3:
-    unlock_pages(xch, cxpt->residencies, max_cx * sizeof(uint64_t));
 unlock_2:
-    unlock_pages(xch, cxpt->triggers, max_cx * sizeof(uint64_t));
+    xc_hypercall_bounce_post(xch, residencies);
 unlock_1:
-    unlock_pages(xch, cxpt, sizeof(struct xc_cx_stat));
+    xc_hypercall_bounce_post(xch, triggers);
 unlock_0:
     return ret;
 }
@@ -186,12 +191,19 @@ int xc_get_cpufreq_para(xc_interface *xc
     DECLARE_SYSCTL;
     int ret = 0;
     struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
+    DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
+                        user_para->affected_cpus,
+                        user_para->cpu_num * sizeof(uint32_t), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
+                        user_para->scaling_available_frequencies,
+                        user_para->freq_num * sizeof(uint32_t), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
+                        user_para->scaling_available_governors,
+                        user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
     bool has_num = user_para->cpu_num &&
                      user_para->freq_num &&
                      user_para->gov_num;
-
-    if ( (xch < 0) || !user_para )
-        return -EINVAL;
 
     if ( has_num )
     {
@@ -200,22 +212,16 @@ int xc_get_cpufreq_para(xc_interface *xc
              (!user_para->scaling_available_governors) )
             return -EINVAL;
 
-        if ( (ret = lock_pages(xch, user_para->affected_cpus,
-                               user_para->cpu_num * sizeof(uint32_t))) )
+        if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
             goto unlock_1;
-        if ( (ret = lock_pages(xch, user_para->scaling_available_frequencies,
-                               user_para->freq_num * sizeof(uint32_t))) )
+        if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
             goto unlock_2;
-        if ( (ret = lock_pages(xch, user_para->scaling_available_governors,
-                 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char))) )
+        if ( xc_hypercall_bounce_pre(xch, scaling_available_governors) )
             goto unlock_3;
 
-        set_xen_guest_handle(sys_para->affected_cpus,
-                             user_para->affected_cpus);
-        set_xen_guest_handle(sys_para->scaling_available_frequencies,
-                             user_para->scaling_available_frequencies);
-        set_xen_guest_handle(sys_para->scaling_available_governors,
-                             user_para->scaling_available_governors);
+        xc_set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
+        xc_set_xen_guest_handle(sys_para->scaling_available_frequencies, 
scaling_available_frequencies);
+        xc_set_xen_guest_handle(sys_para->scaling_available_governors, 
scaling_available_governors);
     }
 
     sysctl.cmd = XEN_SYSCTL_pm_op;
@@ -250,7 +256,7 @@ int xc_get_cpufreq_para(xc_interface *xc
         user_para->scaling_min_freq = sys_para->scaling_min_freq;
         user_para->turbo_enabled    = sys_para->turbo_enabled;
 
-        memcpy(user_para->scaling_driver, 
+        memcpy(user_para->scaling_driver,
                 sys_para->scaling_driver, CPUFREQ_NAME_LEN);
         memcpy(user_para->scaling_governor,
                 sys_para->scaling_governor, CPUFREQ_NAME_LEN);
@@ -263,14 +269,11 @@ int xc_get_cpufreq_para(xc_interface *xc
     }
 
 unlock_4:
-    unlock_pages(xch, user_para->scaling_available_governors,
-                 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char));
+    xc_hypercall_bounce_post(xch, scaling_available_governors);
 unlock_3:
-    unlock_pages(xch, user_para->scaling_available_frequencies,
-                 user_para->freq_num * sizeof(uint32_t));
+    xc_hypercall_bounce_post(xch, scaling_available_frequencies);
 unlock_2:
-    unlock_pages(xch, user_para->affected_cpus,
-                 user_para->cpu_num * sizeof(uint32_t));
+    xc_hypercall_bounce_post(xch, affected_cpus);
 unlock_1:
     return ret;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_private.h  Mon Sep 06 14:28:11 2010 +0100
@@ -238,18 +238,18 @@ static inline int do_sysctl(xc_interface
 {
     int ret = -1;
     DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BOUNCE(sysctl, sizeof(*sysctl), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
 
-    if ( hcall_buf_prep(xch, (void **)&sysctl, sizeof(*sysctl)) != 0 )
+    sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, sysctl) )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
     }
 
-    sysctl->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
-
     hypercall.op     = __HYPERVISOR_sysctl;
-    hypercall.arg[0] = (unsigned long)sysctl;
-
+    hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(sysctl);
     if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0 )
     {
         if ( errno == EACCES )
@@ -257,8 +257,7 @@ static inline int do_sysctl(xc_interface
                     " rebuild the user-space tool set?\n");
     }
 
-    hcall_buf_release(xch, (void **)&sysctl, sizeof(*sysctl));
-
+    xc_hypercall_bounce_post(xch, sysctl);
  out1:
     return ret;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xc_tbuf.c
--- a/tools/libxc/xc_tbuf.c     Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_tbuf.c     Mon Sep 06 14:28:11 2010 +0100
@@ -116,9 +116,15 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
 int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask)
 {
     DECLARE_SYSCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, bytemap);
     int ret = -1;
     uint64_t mask64 = mask;
-    uint8_t bytemap[sizeof(mask64)];
+
+    bytemap = xc_hypercall_buffer_alloc(xch, bytemap, sizeof(mask64));
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out;
+    }
 
     sysctl.cmd = XEN_SYSCTL_tbuf_op;
     sysctl.interface_version = XEN_SYSCTL_INTERFACE_VERSION;
@@ -126,18 +132,12 @@ int xc_tbuf_set_cpu_mask(xc_interface *x
 
     bitmap_64_to_byte(bytemap, &mask64, sizeof (mask64) * 8);
 
-    set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
+    xc_set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, bytemap);
     sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(bytemap) * 8;
-
-    if ( lock_pages(xch, &bytemap, sizeof(bytemap)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
 
     ret = do_sysctl(xch, &sysctl);
 
-    unlock_pages(xch, &bytemap, sizeof(bytemap));
+    xc_hypercall_buffer_free(xch, bytemap);
 
  out:
     return ret;
diff -r 2a5e84fe718a -r 92e9795d0641 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xenctrl.h     Mon Sep 06 14:28:11 2010 +0100
@@ -996,21 +996,18 @@ int xc_perfc_query_number(xc_interface *
 int xc_perfc_query_number(xc_interface *xch,
                           int *nbr_desc,
                           int *nbr_val);
-/* IMPORTANT: The caller is responsible for mlock()'ing the @desc and @val
-   arrays. */
 int xc_perfc_query(xc_interface *xch,
-                   xc_perfc_desc_t *desc,
-                   xc_perfc_val_t *val);
+                   xc_hypercall_buffer_t *desc,
+                   xc_hypercall_buffer_t *val);
 
 typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
 int xc_lockprof_reset(xc_interface *xch);
 int xc_lockprof_query_number(xc_interface *xch,
                              uint32_t *n_elems);
-/* IMPORTANT: The caller is responsible for mlock()'ing the @data array. */
 int xc_lockprof_query(xc_interface *xch,
                       uint32_t *n_elems,
                       uint64_t *time,
-                      xc_lockprof_data_t *data);
+                      xc_hypercall_buffer_t *data);
 
 /**
  * Memory maps a range within one domain to a local address range.  Mappings
diff -r 2a5e84fe718a -r 92e9795d0641 tools/misc/xenlockprof.c
--- a/tools/misc/xenlockprof.c  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/misc/xenlockprof.c  Mon Sep 06 14:28:11 2010 +0100
@@ -18,22 +18,6 @@
 #include <string.h>
 #include <inttypes.h>
 
-static int lock_pages(void *addr, size_t len)
-{
-    int e = 0;
-#ifndef __sun__
-    e = mlock(addr, len);
-#endif
-    return (e);
-}
-
-static void unlock_pages(void *addr, size_t len)
-{
-#ifndef __sun__
-    munlock(addr, len);
-#endif
-}
-
 int main(int argc, char *argv[])
 {
     xc_interface      *xc_handle;
@@ -41,7 +25,7 @@ int main(int argc, char *argv[])
     uint64_t           time;
     double             l, b, sl, sb;
     char               name[60];
-    xc_lockprof_data_t *data;
+       DECLARE_HYPERCALL_BUFFER(xc_lockprof_data_t, data);
 
     if ( (argc > 2) || ((argc == 2) && (strcmp(argv[1], "-r") != 0)) )
     {
@@ -78,8 +62,8 @@ int main(int argc, char *argv[])
     }
 
     n += 32;    /* just to be sure */
-    data = malloc(sizeof(*data) * n);
-    if ( (data == NULL) || (lock_pages(data, sizeof(*data) * n) != 0) )
+    data = xc_hypercall_buffer_alloc(xc_handle, data, sizeof(*data) * n);
+    if ( data == NULL )
     {
         fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
                 errno, strerror(errno));
@@ -87,14 +71,12 @@ int main(int argc, char *argv[])
     }
 
     i = n;
-    if ( xc_lockprof_query(xc_handle, &i, &time, data) != 0 )
+    if ( xc_lockprof_query(xc_handle, &i, &time, HYPERCALL_BUFFER(data)) != 0 )
     {
         fprintf(stderr, "Error getting profile records: %d (%s)\n",
                 errno, strerror(errno));
         return 1;
     }
-
-    unlock_pages(data, sizeof(*data) * n);
 
     if ( i > n )
     {
@@ -132,5 +114,7 @@ int main(int argc, char *argv[])
     printf("total locked time:    %20.9fs\n", sl);
     printf("total blocked time:   %20.9fs\n", sb);
 
+       xc_hypercall_buffer_free(xc_handle, data);
+
     return 0;
 }
diff -r 2a5e84fe718a -r 92e9795d0641 tools/misc/xenperf.c
--- a/tools/misc/xenperf.c      Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/misc/xenperf.c      Mon Sep 06 14:28:11 2010 +0100
@@ -68,28 +68,12 @@ const char *hypercall_name_table[64] =
 };
 #undef X
 
-static int lock_pages(void *addr, size_t len)
-{
-    int e = 0;
-#ifndef __sun__
-    e = mlock(addr, len);
-#endif
-    return (e);
-}
-
-static void unlock_pages(void *addr, size_t len)
-{
-#ifndef __sun__
-    munlock(addr, len);
-#endif
-}
-
 int main(int argc, char *argv[])
 {
     int              i, j;
     xc_interface    *xc_handle;
-    xc_perfc_desc_t *pcd;
-    xc_perfc_val_t  *pcv;
+    DECLARE_HYPERCALL_BUFFER(xc_perfc_desc_t, pcd);
+    DECLARE_HYPERCALL_BUFFER(xc_perfc_val_t, pcv);
     xc_perfc_val_t  *val;
     int num_desc, num_val;
     unsigned int    sum, reset = 0, full = 0, pretty = 0;
@@ -154,28 +138,22 @@ int main(int argc, char *argv[])
         return 1;
     }
 
-    pcd = malloc(sizeof(*pcd) * num_desc);
-    pcv = malloc(sizeof(*pcv) * num_val);
+    pcd = xc_hypercall_buffer_alloc(xc_handle, pcd, sizeof(*pcd) * num_desc);
+    pcv = xc_hypercall_buffer_alloc(xc_handle, pcv, sizeof(*pcv) * num_val);
 
-    if ( pcd == NULL
-         || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0
-         || pcv == NULL
-         || lock_pages(pcv, sizeof(*pcv) * num_val) != 0)
+    if ( pcd == NULL || pcv == NULL)
     {
         fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n",
                 errno, strerror(errno));
         exit(-1);
     }
 
-    if ( xc_perfc_query(xc_handle, pcd, pcv) != 0 )
+    if ( xc_perfc_query(xc_handle, HYPERCALL_BUFFER(pcd), 
HYPERCALL_BUFFER(pcv)) != 0 )
     {
         fprintf(stderr, "Error getting perf counter: %d (%s)\n",
                 errno, strerror(errno));
         return 1;
     }
-
-    unlock_pages(pcd, sizeof(*pcd) * num_desc);
-    unlock_pages(pcv, sizeof(*pcv) * num_val);
 
     val = pcv;
     for ( i = 0; i < num_desc; i++ )
@@ -221,5 +199,7 @@ int main(int argc, char *argv[])
         val += pcd[i].nr_vals;
     }
 
+       xc_hypercall_buffer_free(xc_handle, pcd);
+       xc_hypercall_buffer_free(xc_handle, pcv);
     return 0;
 }

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>