WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 03 of 25] libxc: convert domctl interfaces over to hy

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 03 of 25] libxc: convert domctl interfaces over to hypercall buffers
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Thu, 21 Oct 2010 11:58:46 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Thu, 21 Oct 2010 04:06:09 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1287658723@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1287650254 -3600
# Node ID 6766a5b07735888ae5c5fdc16cbb4a915a997f05
# Parent  f262590f9b94d9f6da603082748fe9f560becc7d
libxc: convert domctl interfaces over to hypercall buffers

(defer save/restore and shadow related interfaces til a later patch)

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r f262590f9b94 -r 6766a5b07735 tools/libxc/xc_dom_boot.c
--- a/tools/libxc/xc_dom_boot.c Thu Oct 21 09:37:34 2010 +0100
+++ b/tools/libxc/xc_dom_boot.c Thu Oct 21 09:37:34 2010 +0100
@@ -61,9 +61,10 @@ static int setup_hypercall_page(struct x
     return rc;
 }
 
-static int launch_vm(xc_interface *xch, domid_t domid, void *ctxt)
+static int launch_vm(xc_interface *xch, domid_t domid, xc_hypercall_buffer_t 
*ctxt)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(ctxt);
     int rc;
 
     xc_dom_printf(xch, "%s: called, ctxt=%p", __FUNCTION__, ctxt);
@@ -71,7 +72,7 @@ static int launch_vm(xc_interface *xch, 
     domctl.cmd = XEN_DOMCTL_setvcpucontext;
     domctl.domain = domid;
     domctl.u.vcpucontext.vcpu = 0;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
     rc = do_domctl(xch, &domctl);
     if ( rc != 0 )
         xc_dom_panic(xch, XC_INTERNAL_ERROR,
@@ -202,8 +203,12 @@ int xc_dom_boot_image(struct xc_dom_imag
 int xc_dom_boot_image(struct xc_dom_image *dom)
 {
     DECLARE_DOMCTL;
-    vcpu_guest_context_any_t ctxt;
+    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
     int rc;
+
+    ctxt = xc_hypercall_buffer_alloc(dom->xch, ctxt, sizeof(*ctxt));
+    if ( ctxt == NULL )
+        return -1;
 
     DOMPRINTF_CALLED(dom->xch);
 
@@ -260,12 +265,13 @@ int xc_dom_boot_image(struct xc_dom_imag
         return rc;
 
     /* let the vm run */
-    memset(&ctxt, 0, sizeof(ctxt));
-    if ( (rc = dom->arch_hooks->vcpu(dom, &ctxt)) != 0 )
+    memset(ctxt, 0, sizeof(ctxt));
+    if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 )
         return rc;
     xc_dom_unmap_all(dom);
-    rc = launch_vm(dom->xch, dom->guest_domid, &ctxt);
+    rc = launch_vm(dom->xch, dom->guest_domid, HYPERCALL_BUFFER(ctxt));
 
+    xc_hypercall_buffer_free(dom->xch, ctxt);
     return rc;
 }
 
diff -r f262590f9b94 -r 6766a5b07735 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Thu Oct 21 09:37:34 2010 +0100
+++ b/tools/libxc/xc_domain.c   Thu Oct 21 09:37:34 2010 +0100
@@ -115,36 +115,31 @@ int xc_vcpu_setaffinity(xc_interface *xc
                         uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
+    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
+    if ( local == NULL )
     {
-        PERROR("Could not alloc memory for Xen hypercall");
+        PERROR("Could not allocate memory for setvcpuaffinity domctl 
hypercall");
         goto out;
     }
+
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
     bitmap_64_to_byte(local, cpumap, cpusize * 8);
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    xc_set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
 
     domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(xch, local, cpusize) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, local, cpusize);
+    xc_hypercall_buffer_free(xch, local);
 
  out:
-    free(local);
     return ret;
 }
 
@@ -155,12 +150,13 @@ int xc_vcpu_getaffinity(xc_interface *xc
                         uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
 
+    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
     if(local == NULL)
     {
-        PERROR("Could not alloc memory for Xen hypercall");
+        PERROR("Could not allocate memory for getvcpuaffinity domctl 
hypercall");
         goto out;
     }
 
@@ -168,22 +164,15 @@ int xc_vcpu_getaffinity(xc_interface *xc
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    xc_set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
     domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(xch, local, sizeof(local)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, local, sizeof (local));
     bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    xc_hypercall_buffer_free(xch, local);
 out:
-    free(local);
     return ret;
 }
 
@@ -283,20 +272,19 @@ int xc_domain_hvm_getcontext(xc_interfac
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_gethvmcontext;
     domctl.domain = (domid_t)domid;
     domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    if ( ctxt_buf ) 
-        if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-            return ret;
+    xc_set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
 
     ret = do_domctl(xch, &domctl);
 
-    if ( ctxt_buf ) 
-        unlock_pages(xch, ctxt_buf, size);
+    xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
 }
@@ -312,23 +300,21 @@ int xc_domain_hvm_getcontext_partial(xc_
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
 
-    if ( !ctxt_buf ) 
-        return -EINVAL;
+    if ( !ctxt_buf || xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
     domctl.domain = (domid_t) domid;
     domctl.u.hvmcontext_partial.type = typecode;
     domctl.u.hvmcontext_partial.instance = instance;
-    set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
+    xc_set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
 
-    if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-        return ret;
-    
     ret = do_domctl(xch, &domctl);
 
-    if ( ctxt_buf ) 
-        unlock_pages(xch, ctxt_buf, size);
+    if ( ctxt_buf )
+        xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return ret ? -1 : 0;
 }
@@ -341,18 +327,19 @@ int xc_domain_hvm_setcontext(xc_interfac
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_sethvmcontext;
     domctl.domain = domid;
     domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-        return ret;
+    xc_set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, ctxt_buf, size);
+    xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return ret;
 }
@@ -364,18 +351,19 @@ int xc_vcpu_getcontext(xc_interface *xch
 {
     int rc;
     DECLARE_DOMCTL;
-    size_t sz = sizeof(vcpu_guest_context_any_t);
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_getvcpucontext;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    
-    if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
-        return rc;
     rc = do_domctl(xch, &domctl);
-    unlock_pages(xch, ctxt, sz);
+
+    xc_hypercall_bounce_post(xch, ctxt);
 
     return rc;
 }
@@ -558,22 +546,24 @@ int xc_domain_get_tsc_info(xc_interface 
 {
     int rc;
     DECLARE_DOMCTL;
-    xen_guest_tsc_info_t info = { 0 };
+    DECLARE_HYPERCALL_BUFFER(xen_guest_tsc_info_t, info);
+
+    info = xc_hypercall_buffer_alloc(xch, info, sizeof(*info));
+    if ( info == NULL )
+        return -ENOMEM;
 
     domctl.cmd = XEN_DOMCTL_gettscinfo;
     domctl.domain = (domid_t)domid;
-    set_xen_guest_handle(domctl.u.tsc_info.out_info, &info);
-    if ( (rc = lock_pages(xch, &info, sizeof(info))) != 0 )
-        return rc;
+    xc_set_xen_guest_handle(domctl.u.tsc_info.out_info, info);
     rc = do_domctl(xch, &domctl);
     if ( rc == 0 )
     {
-        *tsc_mode = info.tsc_mode;
-        *elapsed_nsec = info.elapsed_nsec;
-        *gtsc_khz = info.gtsc_khz;
-        *incarnation = info.incarnation;
+        *tsc_mode = info->tsc_mode;
+        *elapsed_nsec = info->elapsed_nsec;
+        *gtsc_khz = info->gtsc_khz;
+        *incarnation = info->incarnation;
     }
-    unlock_pages(xch, &info,sizeof(info));
+    xc_hypercall_buffer_free(xch, info);
     return rc;
 }
 
@@ -957,8 +947,8 @@ int xc_vcpu_setcontext(xc_interface *xch
                        vcpu_guest_context_any_t *ctxt)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), 
XC_HYPERCALL_BUFFER_BOUNCE_IN);
     int rc;
-    size_t sz = sizeof(vcpu_guest_context_any_t);
 
     if (ctxt == NULL)
     {
@@ -966,16 +956,17 @@ int xc_vcpu_setcontext(xc_interface *xch
         return -1;
     }
 
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
+
     domctl.cmd = XEN_DOMCTL_setvcpucontext;
     domctl.domain = domid;
     domctl.u.vcpucontext.vcpu = vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
-        return rc;
     rc = do_domctl(xch, &domctl);
-    
-    unlock_pages(xch, ctxt, sz);
+
+    xc_hypercall_bounce_post(xch, ctxt);
 
     return rc;
 }
@@ -1101,6 +1092,13 @@ int xc_get_device_group(
 {
     int rc;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(sdev_array, max_sdevs * sizeof(*sdev_array), 
XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, sdev_array) )
+    {
+        PERROR("Could not bounce buffer for xc_get_device_group");
+        return -1;
+    }
 
     domctl.cmd = XEN_DOMCTL_get_device_group;
     domctl.domain = (domid_t)domid;
@@ -1108,17 +1106,14 @@ int xc_get_device_group(
     domctl.u.get_device_group.machine_bdf = machine_bdf;
     domctl.u.get_device_group.max_sdevs = max_sdevs;
 
-    set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
+    xc_set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
 
-    if ( lock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
-    {
-        PERROR("Could not lock memory for xc_get_device_group");
-        return -ENOMEM;
-    }
     rc = do_domctl(xch, &domctl);
-    unlock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array));
 
     *num_sdevs = domctl.u.get_device_group.num_sdevs;
+
+    xc_hypercall_bounce_post(xch, sdev_array);
+
     return rc;
 }
 
diff -r f262590f9b94 -r 6766a5b07735 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Thu Oct 21 09:37:34 2010 +0100
+++ b/tools/libxc/xc_private.c  Thu Oct 21 09:37:34 2010 +0100
@@ -322,12 +322,18 @@ int xc_get_pfn_type_batch(xc_interface *
 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
                           unsigned int num, xen_pfn_t *arr)
 {
+    int rc;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    if ( xc_hypercall_bounce_pre(xch, arr) )
+        return -1;
     domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
     domctl.domain = (domid_t)dom;
     domctl.u.getpageframeinfo3.num = num;
-    set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
-    return do_domctl(xch, &domctl);
+    xc_set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
+    rc = do_domctl(xch, &domctl);
+    xc_hypercall_bounce_post(xch, arr);
+    return rc;
 }
 
 int xc_mmuext_op(
@@ -498,25 +504,27 @@ int xc_get_pfn_list(xc_interface *xch,
                     unsigned long max_pfns)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(pfn_buf, max_pfns * sizeof(*pfn_buf), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
     int ret;
-    domctl.cmd = XEN_DOMCTL_getmemlist;
-    domctl.domain   = (domid_t)domid;
-    domctl.u.getmemlist.max_pfns = max_pfns;
-    set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
 
 #ifdef VALGRIND
     memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
 #endif
 
-    if ( lock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, pfn_buf) )
     {
-        PERROR("xc_get_pfn_list: pfn_buf lock failed");
+        PERROR("xc_get_pfn_list: pfn_buf bounce failed");
         return -1;
     }
 
+    domctl.cmd = XEN_DOMCTL_getmemlist;
+    domctl.domain   = (domid_t)domid;
+    domctl.u.getmemlist.max_pfns = max_pfns;
+    xc_set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
+
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf));
+    xc_hypercall_bounce_post(xch, pfn_buf);
 
     return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
 }
diff -r f262590f9b94 -r 6766a5b07735 tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Thu Oct 21 09:37:34 2010 +0100
+++ b/tools/libxc/xc_private.h  Thu Oct 21 09:37:34 2010 +0100
@@ -211,17 +211,18 @@ static inline int do_domctl(xc_interface
 {
     int ret = -1;
     DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BOUNCE(domctl, sizeof(*domctl), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
 
-    if ( hcall_buf_prep(xch, (void **)&domctl, sizeof(*domctl)) != 0 )
+    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, domctl) )
     {
-        PERROR("Could not lock memory for Xen hypercall");
+        PERROR("Could not bounce buffer for domctl hypercall");
         goto out1;
     }
 
-    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
-
     hypercall.op     = __HYPERVISOR_domctl;
-    hypercall.arg[0] = (unsigned long)domctl;
+    hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(domctl);
 
     if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0 )
     {
@@ -230,8 +231,7 @@ static inline int do_domctl(xc_interface
                     " rebuild the user-space tool set?\n");
     }
 
-    hcall_buf_release(xch, (void **)&domctl, sizeof(*domctl));
-
+    xc_hypercall_bounce_post(xch, domctl);
  out1:
     return ret;
 }
diff -r f262590f9b94 -r 6766a5b07735 tools/libxc/xc_resume.c
--- a/tools/libxc/xc_resume.c   Thu Oct 21 09:37:34 2010 +0100
+++ b/tools/libxc/xc_resume.c   Thu Oct 21 09:37:34 2010 +0100
@@ -196,12 +196,6 @@ static int xc_domain_resume_any(xc_inter
         goto out;
     }
 
-    if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
-    {
-        ERROR("Unable to lock ctxt");
-        goto out;
-    }
-
     if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
     {
         ERROR("Could not get vcpu context");
@@ -235,7 +229,6 @@ static int xc_domain_resume_any(xc_inter
 
 #if defined(__i386__) || defined(__x86_64__)
  out:
-    unlock_pages(xch, (void *)&ctxt, sizeof ctxt);
     if (p2m)
         munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
     if (p2m_frame_list)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel