WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] libxc: make do_memory_op's callers respon

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] libxc: make do_memory_op's callers responsible for locking indirect buffers
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 20 Oct 2010 06:50:31 -0700
Delivery-date: Wed, 20 Oct 2010 06:54:14 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1287419806 -3600
# Node ID 1449b5e2bad56093e6d48101b28aa99613dd9dac
# Parent  ba91258502025fbfdfd3b2e1deaf0b013474be47
libxc: make do_memory_op's callers responsible for locking indirect buffers

Push responsibility for locking buffers refered to by the memory_op
argument up into the callers (which are now all internal to libxc).

This removes the last of the introspecation from do_memory_op and
generally makes the transistion to hypercall buffers smoother.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Signed-off-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
 tools/libxc/xc_domain.c  |   28 ++++++++++++++++-
 tools/libxc/xc_private.c |   77 ++++++++---------------------------------------
 2 files changed, 41 insertions(+), 64 deletions(-)

diff -r ba9125850202 -r 1449b5e2bad5 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Mon Oct 18 17:35:59 2010 +0100
+++ b/tools/libxc/xc_domain.c   Mon Oct 18 17:36:46 2010 +0100
@@ -599,9 +599,18 @@ int xc_domain_increase_reservation(xc_in
     };
 
     /* may be NULL */
+    if ( extent_start && lock_pages(xch, extent_start, nr_extents * 
sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_increase_reservation 
hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_increase_reservation, &reservation, 
sizeof(reservation));
+
+    if ( extent_start )
+        unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
@@ -647,7 +656,11 @@ int xc_domain_decrease_reservation(xc_in
         .domid        = domid
     };
 
-    set_xen_guest_handle(reservation.extent_start, extent_start);
+    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_decrease_reservation 
hypercall");
+        return -1;
+    }
 
     if ( extent_start == NULL )
     {
@@ -656,7 +669,11 @@ int xc_domain_decrease_reservation(xc_in
         return -1;
     }
 
+    set_xen_guest_handle(reservation.extent_start, extent_start);
+
     err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, 
sizeof(reservation));
+
+    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
@@ -715,9 +732,18 @@ int xc_domain_populate_physmap(xc_interf
         .mem_flags    = mem_flags,
         .domid        = domid
     };
+
+    if ( lock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_populate_physmap hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, 
sizeof(reservation));
+
+    unlock_pages(xch, extent_start, nr_extents * sizeof(xen_pfn_t));
 
     return err;
 }
diff -r ba9125850202 -r 1449b5e2bad5 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Mon Oct 18 17:35:59 2010 +0100
+++ b/tools/libxc/xc_private.c  Mon Oct 18 17:36:46 2010 +0100
@@ -424,9 +424,6 @@ int do_memory_op(xc_interface *xch, int 
 int do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
 {
     DECLARE_HYPERCALL;
-    struct xen_memory_reservation *reservation = arg;
-    struct xen_machphys_mfn_list *xmml = arg;
-    xen_pfn_t *extent_start;
     long ret = -EINVAL;
 
     hypercall.op     = __HYPERVISOR_memory_op;
@@ -439,68 +436,10 @@ int do_memory_op(xc_interface *xch, int 
         goto out1;
     }
 
-    switch ( cmd )
-    {
-    case XENMEM_increase_reservation:
-    case XENMEM_decrease_reservation:
-    case XENMEM_populate_physmap:
-        get_xen_guest_handle(extent_start, reservation->extent_start);
-        if ( (extent_start != NULL) &&
-             (lock_pages(xch, extent_start,
-                    reservation->nr_extents * sizeof(xen_pfn_t)) != 0) )
-        {
-            PERROR("Could not lock");
-            unlock_pages(xch, reservation, sizeof(*reservation));
-            goto out1;
-        }
-        break;
-    case XENMEM_machphys_mfn_list:
-        get_xen_guest_handle(extent_start, xmml->extent_start);
-        if ( lock_pages(xch, extent_start,
-                   xmml->max_extents * sizeof(xen_pfn_t)) != 0 )
-        {
-            PERROR("Could not lock");
-            unlock_pages(xch, xmml, sizeof(*xmml));
-            goto out1;
-        }
-        break;
-    case XENMEM_add_to_physmap:
-    case XENMEM_current_reservation:
-    case XENMEM_maximum_reservation:
-    case XENMEM_maximum_gpfn:
-    case XENMEM_set_pod_target:
-    case XENMEM_get_pod_target:
-        break;
-    }
-
     ret = do_xen_hypercall(xch, &hypercall);
 
     if ( len )
         unlock_pages(xch, arg, len);
-
-    switch ( cmd )
-    {
-    case XENMEM_increase_reservation:
-    case XENMEM_decrease_reservation:
-    case XENMEM_populate_physmap:
-        get_xen_guest_handle(extent_start, reservation->extent_start);
-        if ( extent_start != NULL )
-            unlock_pages(xch, extent_start,
-                         reservation->nr_extents * sizeof(xen_pfn_t));
-        break;
-    case XENMEM_machphys_mfn_list:
-        get_xen_guest_handle(extent_start, xmml->extent_start);
-        unlock_pages(xch, extent_start,
-                     xmml->max_extents * sizeof(xen_pfn_t));
-        break;
-    case XENMEM_add_to_physmap:
-    case XENMEM_current_reservation:
-    case XENMEM_maximum_reservation:
-    case XENMEM_maximum_gpfn:
-    case XENMEM_set_pod_target:
-    case XENMEM_get_pod_target:
-        break;
-    }
 
  out1:
     return ret;
@@ -534,11 +473,23 @@ int xc_machphys_mfn_list(xc_interface *x
     struct xen_machphys_mfn_list xmml = {
         .max_extents = max_extents,
     };
+
+    if ( lock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t)) != 0 )
+    {
+        PERROR("Could not lock memory for XENMEM_machphys_mfn_list hypercall");
+        return -1;
+    }
+
     set_xen_guest_handle(xmml.extent_start, extent_start);
     rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
     if (rc || xmml.nr_extents != max_extents)
-        return -1;
-    return 0;
+        rc = -1;
+    else
+        rc = 0;
+
+    unlock_pages(xch, extent_start, max_extents * sizeof(xen_pfn_t));
+
+    return rc;
 }
 
 #ifndef __ia64__

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] libxc: make do_memory_op's callers responsible for locking indirect buffers, Xen patchbot-unstable <=