WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-3.2-testing] save/restore: Use page-aligned allocat

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-3.2-testing] save/restore: Use page-aligned allocations for hypercall args that are
From: "Xen patchbot-3.2-testing" <patchbot-3.2-testing@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 09 Apr 2008 09:10:59 -0700
Delivery-date: Wed, 09 Apr 2008 09:11:57 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1207756565 -3600
# Node ID 26bf346f2a830afa320812bc318a2cfac8108fc4
# Parent  f00c9c09759cac3316c91a62df064f8dea50151b
save/restore: Use page-aligned allocations for hypercall args that are
mlock()ed across other hypercall invocations, to avoid aliasing with
other hypercall arguments, causing spurious unlocking.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
xen-unstable changeset:   17424:f410fa7f379c3a2fc8d4117188f36899e63e6053
xen-unstable date:        Wed Apr 09 16:31:16 2008 +0100
---
 tools/libxc/xc_domain_restore.c |   28 ++++++++++++++++++----------
 tools/libxc/xc_domain_save.c    |   12 +++++++-----
 tools/libxc/xg_private.h        |   18 ++++++++++++++++++
 3 files changed, 43 insertions(+), 15 deletions(-)

diff -r f00c9c09759c -r 26bf346f2a83 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Wed Apr 09 16:45:46 2008 +0100
+++ b/tools/libxc/xc_domain_restore.c   Wed Apr 09 16:56:05 2008 +0100
@@ -284,7 +284,8 @@ int xc_domain_restore(int xc_handle, int
     /* The new domain's shared-info frame number. */
     unsigned long shared_info_frame;
     unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
-    shared_info_either_t *old_shared_info = (shared_info_either_t 
*)shared_info_page;
+    shared_info_either_t *old_shared_info = 
+        (shared_info_either_t *)shared_info_page;
     shared_info_either_t *new_shared_info;
 
     /* A copy of the CPU context of the guest. */
@@ -349,13 +350,6 @@ int xc_domain_restore(int xc_handle, int
     guest_width = sizeof(unsigned long);
     pt_levels = (guest_width == 8) ? 4 : (pt_levels == 2) ? 2 : 3; 
     
-    if ( lock_pages(&ctxt, sizeof(ctxt)) )
-    {
-        /* needed for build domctl, but might as well do early */
-        ERROR("Unable to lock ctxt");
-        return 1;
-    }
-
     if ( !hvm ) 
     {
         /* Load the p2m frame list, plus potential extended info chunk */
@@ -380,8 +374,11 @@ int xc_domain_restore(int xc_handle, int
     /* We want zeroed memory so use calloc rather than malloc. */
     p2m        = calloc(p2m_size, MAX(guest_width, sizeof (xen_pfn_t))); 
     pfn_type   = calloc(p2m_size, sizeof(unsigned long));
-    region_mfn = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
-    p2m_batch  = calloc(MAX_BATCH_SIZE, sizeof(xen_pfn_t));
+
+    region_mfn = xg_memalign(PAGE_SIZE, ROUNDUP(
+                              MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
+    p2m_batch  = xg_memalign(PAGE_SIZE, ROUNDUP(
+                              MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT));
 
     if ( (p2m == NULL) || (pfn_type == NULL) ||
          (region_mfn == NULL) || (p2m_batch == NULL) )
@@ -390,6 +387,11 @@ int xc_domain_restore(int xc_handle, int
         errno = ENOMEM;
         goto out;
     }
+
+    memset(region_mfn, 0,
+           ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT)); 
+    memset(p2m_batch, 0,
+           ROUNDUP(MAX_BATCH_SIZE * sizeof(xen_pfn_t), PAGE_SHIFT)); 
 
     if ( lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE) )
     {
@@ -974,6 +976,12 @@ int xc_domain_restore(int xc_handle, int
             else
                 DPRINTF("Decreased reservation by %d pages\n", count);
         }
+    }
+
+    if ( lock_pages(&ctxt, sizeof(ctxt)) )
+    {
+        ERROR("Unable to lock ctxt");
+        return 1;
     }
 
     for ( i = 0; i <= max_vcpu_id; i++ )
diff -r f00c9c09759c -r 26bf346f2a83 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c      Wed Apr 09 16:45:46 2008 +0100
+++ b/tools/libxc/xc_domain_save.c      Wed Apr 09 16:56:05 2008 +0100
@@ -939,9 +939,9 @@ int xc_domain_save(int xc_handle, int io
     sent_last_iter = p2m_size;
 
     /* Setup to_send / to_fix and to_skip bitmaps */
-    to_send = malloc(BITMAP_SIZE);
+    to_send = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
     to_fix  = calloc(1, BITMAP_SIZE);
-    to_skip = malloc(BITMAP_SIZE);
+    to_skip = xg_memalign(PAGE_SIZE, ROUNDUP(BITMAP_SIZE, PAGE_SHIFT)); 
 
     if ( !to_send || !to_fix || !to_skip )
     {
@@ -983,8 +983,8 @@ int xc_domain_save(int xc_handle, int io
 
     analysis_phase(xc_handle, dom, p2m_size, to_skip, 0);
 
-    /* We want zeroed memory so use calloc rather than malloc. */
-    pfn_type   = calloc(MAX_BATCH_SIZE, sizeof(*pfn_type));
+    pfn_type   = xg_memalign(PAGE_SIZE, ROUNDUP(
+                              MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
     pfn_batch  = calloc(MAX_BATCH_SIZE, sizeof(*pfn_batch));
     if ( (pfn_type == NULL) || (pfn_batch == NULL) )
     {
@@ -992,10 +992,12 @@ int xc_domain_save(int xc_handle, int io
         errno = ENOMEM;
         goto out;
     }
+    memset(pfn_type, 0,
+           ROUNDUP(MAX_BATCH_SIZE * sizeof(*pfn_type), PAGE_SHIFT));
 
     if ( lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type)) )
     {
-        ERROR("Unable to lock");
+        ERROR("Unable to lock pfn_type array");
         goto out;
     }
 
diff -r f00c9c09759c -r 26bf346f2a83 tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h  Wed Apr 09 16:45:46 2008 +0100
+++ b/tools/libxc/xg_private.h  Wed Apr 09 16:56:05 2008 +0100
@@ -7,6 +7,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <malloc.h>
 #include <sys/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -176,4 +177,21 @@ int pin_table(int xc_handle, unsigned in
 int pin_table(int xc_handle, unsigned int type, unsigned long mfn,
               domid_t dom);
 
+/* Grrr portability */
+static inline void *xg_memalign(size_t alignment, size_t size)
+{
+#if (_POSIX_C_SOURCE - 0) >= 200112L || (_XOPEN_SOURCE - 0) >= 600
+    int ret;
+    void *ptr;
+    ret = posix_memalign(&ptr, alignment, size);
+    if (ret != 0)
+        return NULL;
+    return ptr;
+#elif defined(_BSD)
+    return valloc(size);
+#else
+    return memalign(alignment, size);
+#endif
+}
+
 #endif /* XG_PRIVATE_H */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-3.2-testing] save/restore: Use page-aligned allocations for hypercall args that are, Xen patchbot-3.2-testing <=