WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 05 of 18] libxc: add wrappers for XENMEM {increase, d

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 05 of 18] libxc: add wrappers for XENMEM {increase, decrease}_reservation and populate_physmap
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Tue, 12 Oct 2010 15:16:23 +0100
Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
Delivery-date: Tue, 12 Oct 2010 07:29:50 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <patchbomb.1286892978@xxxxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1286892402 -3600
# Node ID 6834151bfad74e84e201062d4e8f3ae58155cd43
# Parent  15c4f1cde006e6d8309eff86a99b609c4c1f090a
libxc: add wrappers for XENMEM {increase,decrease}_reservation and 
populate_physmap

Currently the wrappers for these hypercalls swallow partial success
and return failure to the caller.

In order to use these functions more widely instead of open-coding
uses of XENMEM_* and xc_memory_op add variants which return the actual
hypercall result.

Therefore add the following functions:
  xc_domain_increase_reservation
  xc_domain_decrease_reservation
  xc_domain_populate_physmap
and implement the existing semantics using these new functions as
  xc_domain_increase_reservation_exact
  xc_domain_decrease_reservation_exact
  xc_domain_populate_physmap_exact
replacing the existing xc_domain_memory_* functions.

Use these new functions to replace all open coded uses of
XENMEM_increase_reservation, XENMEM_decrease_reservation and
XENMEM_populate_physmap.

Also rename xc_domain_memory_*_pod_target to xc_domain_*_pod_target
for consistency.

Temporarily add a compatibility macro for
xc_domain_memory_populate_physmap to allow time for qemu to catch up.

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/ia64/xc_ia64_hvm_build.c
--- a/tools/libxc/ia64/xc_ia64_hvm_build.c      Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/ia64/xc_ia64_hvm_build.c      Tue Oct 12 15:06:42 2010 +0100
@@ -903,7 +903,7 @@ xc_ia64_setup_shared_info(xc_interface *
  * In this function, we will allocate memory and build P2M/M2P table for VTI
  * guest.  Frist, a pfn list will be initialized discontiguous, normal memory
  * begins with 0, GFW memory and other five pages at their place defined in
- * xen/include/public/arch-ia64.h xc_domain_memory_populate_physmap() called
+ * xen/include/public/arch-ia64.h xc_domain_populate_physmap_exact() called
  * five times, to set parameter 'extent_order' to different value, this is
  * convenient to allocate discontiguous memory with different size.
  */
@@ -966,7 +966,7 @@ setup_guest(xc_interface *xch, uint32_t 
          pfn++)
         pfn_list[i++] = pfn;
 
-    rc = xc_domain_memory_populate_physmap(xch, dom, nr_pages, 0, 0,
+    rc = xc_domain_populate_physmap_exact(xch, dom, nr_pages, 0, 0,
                                            &pfn_list[0]);
     if (rc != 0) {
         PERROR("Could not allocate normal memory for Vti guest.");
@@ -979,7 +979,7 @@ setup_guest(xc_interface *xch, uint32_t 
     for (i = 0; i < GFW_PAGES; i++) 
         pfn_list[i] = (GFW_START >> PAGE_SHIFT) + i;
 
-    rc = xc_domain_memory_populate_physmap(xch, dom, GFW_PAGES,
+    rc = xc_domain_populate_physmap_exact(xch, dom, GFW_PAGES,
                                            0, 0, &pfn_list[0]);
     if (rc != 0) {
         PERROR("Could not allocate GFW memory for Vti guest.");
@@ -995,7 +995,7 @@ setup_guest(xc_interface *xch, uint32_t 
     pfn_list[nr_special_pages] = memmap_info_pfn;
     nr_special_pages++;
 
-    rc = xc_domain_memory_populate_physmap(xch, dom, nr_special_pages,
+    rc = xc_domain_populate_physmap_exact(xch, dom, nr_special_pages,
                                            0, 0, &pfn_list[0]);
     if (rc != 0) {
         PERROR("Could not allocate IO page or store page or buffer io page.");
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/ia64/xc_ia64_linux_restore.c
--- a/tools/libxc/ia64/xc_ia64_linux_restore.c  Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/ia64/xc_ia64_linux_restore.c  Tue Oct 12 15:06:42 2010 +0100
@@ -49,7 +49,7 @@ populate_page_if_necessary(xc_interface 
     if (xc_ia64_p2m_present(p2m_table, gmfn))
         return 0;
 
-    return xc_domain_memory_populate_physmap(xch, dom, 1, 0, 0, &gmfn);
+    return xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &gmfn);
 }
 
 static int
@@ -112,7 +112,7 @@ xc_ia64_recv_unallocated_list(xc_interfa
         }
     }
     if (nr_frees > 0) {
-        if (xc_domain_memory_decrease_reservation(xch, dom, nr_frees,
+        if (xc_domain_decrease_reservation_exact(xch, dom, nr_frees,
                                                   0, pfntab) < 0) {
             PERROR("Could not decrease reservation");
             goto out;
@@ -546,7 +546,7 @@ xc_ia64_hvm_domain_setup(xc_interface *x
     };
     unsigned long nr_pages = sizeof(pfn_list) / sizeof(pfn_list[0]);
 
-    rc = xc_domain_memory_populate_physmap(xch, dom, nr_pages,
+    rc = xc_domain_populate_physmap_exact(xch, dom, nr_pages,
                                            0, 0, &pfn_list[0]);
     if (rc != 0)
         PERROR("Could not allocate IO page or buffer io page.");
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_dom_ia64.c
--- a/tools/libxc/xc_dom_ia64.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_dom_ia64.c Tue Oct 12 15:06:42 2010 +0100
@@ -186,7 +186,7 @@ int arch_setup_meminit(struct xc_dom_ima
         dom->p2m_host[pfn] = start + pfn;
 
     /* allocate guest memory */
-    rc = xc_domain_memory_populate_physmap(dom->xch, dom->guest_domid,
+    rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
                                            nbr, 0, 0,
                                            dom->p2m_host);
     return rc;
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_dom_x86.c
--- a/tools/libxc/xc_dom_x86.c  Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_dom_x86.c  Tue Oct 12 15:06:42 2010 +0100
@@ -733,7 +733,7 @@ int arch_setup_meminit(struct xc_dom_ima
         DOMPRINTF("Populating memory with %d superpages", count);
         for ( pfn = 0; pfn < count; pfn++ )
             extents[pfn] = pfn << SUPERPAGE_PFN_SHIFT;
-        rc = xc_domain_memory_populate_physmap(dom->xch, dom->guest_domid,
+        rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
                                                count, SUPERPAGE_PFN_SHIFT, 0,
                                                extents);
         if ( rc )
@@ -762,7 +762,7 @@ int arch_setup_meminit(struct xc_dom_ima
             allocsz = dom->total_pages - i;
             if ( allocsz > 1024*1024 )
                 allocsz = 1024*1024;
-            rc = xc_domain_memory_populate_physmap(
+            rc = xc_domain_populate_physmap_exact(
                 dom->xch, dom->guest_domid, allocsz,
                 0, 0, &dom->p2m_host[i]);
         }
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_domain.c   Tue Oct 12 15:06:42 2010 +0100
@@ -579,12 +579,12 @@ int xc_domain_get_tsc_info(xc_interface 
 }
 
 
-int xc_domain_memory_increase_reservation(xc_interface *xch,
-                                          uint32_t domid,
-                                          unsigned long nr_extents,
-                                          unsigned int extent_order,
-                                          unsigned int mem_flags,
-                                          xen_pfn_t *extent_start)
+int xc_domain_increase_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   unsigned int mem_flags,
+                                   xen_pfn_t *extent_start)
 {
     int err;
     struct xen_memory_reservation reservation = {
@@ -598,6 +598,22 @@ int xc_domain_memory_increase_reservatio
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = xc_memory_op(xch, XENMEM_increase_reservation, &reservation);
+
+    return err;
+}
+
+int xc_domain_increase_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         unsigned int mem_flags,
+                                         xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_increase_reservation(xch, domid, nr_extents,
+                                         extent_order, mem_flags, 
extent_start);
+
     if ( err == nr_extents )
         return 0;
 
@@ -613,11 +629,11 @@ int xc_domain_memory_increase_reservatio
     return err;
 }
 
-int xc_domain_memory_decrease_reservation(xc_interface *xch,
-                                          uint32_t domid,
-                                          unsigned long nr_extents,
-                                          unsigned int extent_order,
-                                          xen_pfn_t *extent_start)
+int xc_domain_decrease_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   xen_pfn_t *extent_start)
 {
     int err;
     struct xen_memory_reservation reservation = {
@@ -637,6 +653,21 @@ int xc_domain_memory_decrease_reservatio
     }
 
     err = xc_memory_op(xch, XENMEM_decrease_reservation, &reservation);
+
+    return err;
+}
+
+int xc_domain_decrease_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_decrease_reservation(xch, domid, nr_extents,
+                                         extent_order, extent_start);
+
     if ( err == nr_extents )
         return 0;
 
@@ -651,12 +682,12 @@ int xc_domain_memory_decrease_reservatio
     return err;
 }
 
-int xc_domain_memory_populate_physmap(xc_interface *xch,
-                                      uint32_t domid,
-                                      unsigned long nr_extents,
-                                      unsigned int extent_order,
-                                      unsigned int mem_flags,
-                                      xen_pfn_t *extent_start)
+int xc_domain_populate_physmap(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_extents,
+                               unsigned int extent_order,
+                               unsigned int mem_flags,
+                               xen_pfn_t *extent_start)
 {
     int err;
     struct xen_memory_reservation reservation = {
@@ -668,6 +699,21 @@ int xc_domain_memory_populate_physmap(xc
     set_xen_guest_handle(reservation.extent_start, extent_start);
 
     err = xc_memory_op(xch, XENMEM_populate_physmap, &reservation);
+
+    return err;
+}
+
+int xc_domain_populate_physmap_exact(xc_interface *xch,
+                                     uint32_t domid,
+                                     unsigned long nr_extents,
+                                     unsigned int extent_order,
+                                     unsigned int mem_flags,
+                                     xen_pfn_t *extent_start)
+{
+    int err;
+
+    err = xc_domain_populate_physmap(xch, domid, nr_extents,
+                                     extent_order, mem_flags, extent_start);
     if ( err == nr_extents )
         return 0;
 
@@ -682,13 +728,13 @@ int xc_domain_memory_populate_physmap(xc
     return err;
 }
 
-static int xc_domain_memory_pod_target(xc_interface *xch,
-                                       int op,
-                                       uint32_t domid,
-                                       uint64_t target_pages,
-                                       uint64_t *tot_pages,
-                                       uint64_t *pod_cache_pages,
-                                       uint64_t *pod_entries)
+static int xc_domain_pod_target(xc_interface *xch,
+                                int op,
+                                uint32_t domid,
+                                uint64_t target_pages,
+                                uint64_t *tot_pages,
+                                uint64_t *pod_cache_pages,
+                                uint64_t *pod_entries)
 {
     int err;
 
@@ -701,7 +747,7 @@ static int xc_domain_memory_pod_target(x
 
     if ( err < 0 )
     {
-        DPRINTF("Failed %s_memory_target dom %d\n",
+        DPRINTF("Failed %s_pod_target dom %d\n",
                 (op==XENMEM_set_pod_target)?"set":"get",
                 domid);
         errno = -err;
@@ -719,37 +765,37 @@ static int xc_domain_memory_pod_target(x
 
     return err;
 }
-                                       
 
-int xc_domain_memory_set_pod_target(xc_interface *xch,
-                                    uint32_t domid,
-                                    uint64_t target_pages,
-                                    uint64_t *tot_pages,
-                                    uint64_t *pod_cache_pages,
-                                    uint64_t *pod_entries)
+
+int xc_domain_set_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t target_pages,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
 {
-    return xc_domain_memory_pod_target(xch,
-                                       XENMEM_set_pod_target,
-                                       domid,
-                                       target_pages,
-                                       tot_pages,
-                                       pod_cache_pages,
-                                       pod_entries);
+    return xc_domain_pod_target(xch,
+                                XENMEM_set_pod_target,
+                                domid,
+                                target_pages,
+                                tot_pages,
+                                pod_cache_pages,
+                                pod_entries);
 }
 
-int xc_domain_memory_get_pod_target(xc_interface *xch,
-                                    uint32_t domid,
-                                    uint64_t *tot_pages,
-                                    uint64_t *pod_cache_pages,
-                                    uint64_t *pod_entries)
+int xc_domain_get_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries)
 {
-    return xc_domain_memory_pod_target(xch,
-                                       XENMEM_get_pod_target,
-                                       domid,
-                                       -1,
-                                       tot_pages,
-                                       pod_cache_pages,
-                                       pod_entries);
+    return xc_domain_pod_target(xch,
+                                XENMEM_get_pod_target,
+                                domid,
+                                -1,
+                                tot_pages,
+                                pod_cache_pages,
+                                pod_entries);
 }
 
 int xc_domain_max_vcpus(xc_interface *xch, uint32_t domid, unsigned int max)
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c   Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_domain_restore.c   Tue Oct 12 15:06:42 2010 +0100
@@ -147,7 +147,7 @@ static int uncanonicalize_pagetable(
 
     /* Allocate the requisite number of mfns. */
     if ( nr_mfns &&
-         (xc_domain_memory_populate_physmap(xch, dom, nr_mfns, 0, 0,
+         (xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0, 0,
                                             ctx->p2m_batch) != 0) )
     { 
         ERROR("Failed to allocate memory for batch.!\n"); 
@@ -888,7 +888,7 @@ static int apply_batch(xc_interface *xch
 
     /* Now allocate a bunch of mfns for this batch */
     if ( nr_mfns &&
-         (xc_domain_memory_populate_physmap(xch, dom, nr_mfns, 0,
+         (xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0,
                                             0, ctx->p2m_batch) != 0) )
     { 
         ERROR("Failed to allocate memory for batch.!\n"); 
@@ -1529,15 +1529,7 @@ int xc_domain_restore(xc_interface *xch,
 
         if ( nr_frees > 0 )
         {
-            struct xen_memory_reservation reservation = {
-                .nr_extents   = nr_frees,
-                .extent_order = 0,
-                .domid        = dom
-            };
-            set_xen_guest_handle(reservation.extent_start, 
tailbuf.u.pv.pfntab);
-
-            if ( (frc = xc_memory_op(xch, XENMEM_decrease_reservation,
-                                     &reservation)) != nr_frees )
+            if ( (frc = xc_domain_decrease_reservation(xch, dom, nr_frees, 0, 
tailbuf.u.pv.pfntab)) != nr_frees )
             {
                 PERROR("Could not decrease reservation : %d", frc);
                 goto out;
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_hvm_build.c        Tue Oct 12 15:06:42 2010 +0100
@@ -203,7 +203,7 @@ static int setup_guest(xc_interface *xch
      * Under 2MB mode, we allocate pages in batches of no more than 8MB to 
      * ensure that we can be preempted and hence dom0 remains responsive.
      */
-    rc = xc_domain_memory_populate_physmap(
+    rc = xc_domain_populate_physmap_exact(
         xch, dom, 0xa0, 0, 0, &page_array[0x00]);
     cur_pages = 0xc0;
     stat_normal_pages = 0xc0;
@@ -233,20 +233,16 @@ static int setup_guest(xc_interface *xch
                               SUPERPAGE_1GB_NR_PFNS << PAGE_SHIFT) )
         {
             long done;
-            xen_pfn_t sp_extents[count >> SUPERPAGE_1GB_SHIFT];
-            struct xen_memory_reservation sp_req = {
-                .nr_extents   = count >> SUPERPAGE_1GB_SHIFT,
-                .extent_order = SUPERPAGE_1GB_SHIFT,
-                .domid        = dom
-            };
+            unsigned long nr_extents = count >> SUPERPAGE_1GB_SHIFT;
+            xen_pfn_t sp_extents[nr_extents];
 
-            if ( pod_mode )
-                sp_req.mem_flags = XENMEMF_populate_on_demand;
+            for ( i = 0; i < nr_extents; i++ )
+                sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
 
-            set_xen_guest_handle(sp_req.extent_start, sp_extents);
-            for ( i = 0; i < sp_req.nr_extents; i++ )
-                sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
-            done = xc_memory_op(xch, XENMEM_populate_physmap, &sp_req);
+            done = xc_domain_populate_physmap(xch, dom, nr_extents, 
SUPERPAGE_1GB_SHIFT,
+                                              pod_mode ? 
XENMEMF_populate_on_demand : 0,
+                                              sp_extents);
+
             if ( done > 0 )
             {
                 stat_1gb_pages += done;
@@ -275,20 +271,16 @@ static int setup_guest(xc_interface *xch
             if ( ((count | cur_pages) & (SUPERPAGE_2MB_NR_PFNS - 1)) == 0 )
             {
                 long done;
-                xen_pfn_t sp_extents[count >> SUPERPAGE_2MB_SHIFT];
-                struct xen_memory_reservation sp_req = {
-                    .nr_extents   = count >> SUPERPAGE_2MB_SHIFT,
-                    .extent_order = SUPERPAGE_2MB_SHIFT,
-                    .domid        = dom
-                };
+                unsigned long nr_extents = count >> SUPERPAGE_2MB_SHIFT;
+                xen_pfn_t sp_extents[nr_extents];
 
-                if ( pod_mode )
-                    sp_req.mem_flags = XENMEMF_populate_on_demand;
+                for ( i = 0; i < nr_extents; i++ )
+                    sp_extents[i] = 
page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
 
-                set_xen_guest_handle(sp_req.extent_start, sp_extents);
-                for ( i = 0; i < sp_req.nr_extents; i++ )
-                    sp_extents[i] = 
page_array[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
-                done = xc_memory_op(xch, XENMEM_populate_physmap, &sp_req);
+                done = xc_domain_populate_physmap(xch, dom, nr_extents, 
SUPERPAGE_2MB_SHIFT,
+                                                  pod_mode ? 
XENMEMF_populate_on_demand : 0,
+                                                  sp_extents);
+
                 if ( done > 0 )
                 {
                     stat_2mb_pages += done;
@@ -302,7 +294,7 @@ static int setup_guest(xc_interface *xch
         /* Fall back to 4kB extents. */
         if ( count != 0 )
         {
-            rc = xc_domain_memory_populate_physmap(
+            rc = xc_domain_populate_physmap_exact(
                 xch, dom, count, 0, 0, &page_array[cur_pages]);
             cur_pages += count;
             stat_normal_pages += count;
@@ -313,10 +305,8 @@ static int setup_guest(xc_interface *xch
      * adjust the PoD cache size so that domain tot_pages will be
      * target_pages - 0x20 after this call. */
     if ( pod_mode )
-        rc = xc_domain_memory_set_pod_target(xch,
-                                             dom,
-                                             target_pages - 0x20,
-                                             NULL, NULL, NULL);
+        rc = xc_domain_set_pod_target(xch, dom, target_pages - 0x20,
+                                      NULL, NULL, NULL);
 
     if ( rc != 0 )
     {
@@ -344,7 +334,7 @@ static int setup_guest(xc_interface *xch
     for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
     {
         xen_pfn_t pfn = special_pfn(i);
-        rc = xc_domain_memory_populate_physmap(xch, dom, 1, 0, 0, &pfn);
+        rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
         if ( rc != 0 )
         {
             PERROR("Could not allocate %d'th special page.", i);
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xc_private.c  Tue Oct 12 15:06:42 2010 +0100
@@ -675,14 +675,14 @@ unsigned long xc_make_page_below_4G(
     xen_pfn_t old_mfn = mfn;
     xen_pfn_t new_mfn;
 
-    if ( xc_domain_memory_decrease_reservation(
+    if ( xc_domain_decrease_reservation_exact(
         xch, domid, 1, 0, &old_mfn) != 0 )
     {
         DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
         return 0;
     }
 
-    if ( xc_domain_memory_increase_reservation(
+    if ( xc_domain_increase_reservation_exact(
         xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
     {
         DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxc/xenctrl.h     Tue Oct 12 15:06:42 2010 +0100
@@ -785,38 +785,62 @@ int xc_domain_get_tsc_info(xc_interface 
 
 int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
 
-int xc_domain_memory_increase_reservation(xc_interface *xch,
-                                          uint32_t domid,
-                                          unsigned long nr_extents,
-                                          unsigned int extent_order,
-                                          unsigned int mem_flags,
-                                          xen_pfn_t *extent_start);
+int xc_domain_increase_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   unsigned int mem_flags,
+                                   xen_pfn_t *extent_start);
 
-int xc_domain_memory_decrease_reservation(xc_interface *xch,
-                                          uint32_t domid,
-                                          unsigned long nr_extents,
-                                          unsigned int extent_order,
-                                          xen_pfn_t *extent_start);
+int xc_domain_increase_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         unsigned int mem_flags,
+                                         xen_pfn_t *extent_start);
 
-int xc_domain_memory_populate_physmap(xc_interface *xch,
-                                      uint32_t domid,
-                                      unsigned long nr_extents,
-                                      unsigned int extent_order,
-                                      unsigned int mem_flags,
-                                      xen_pfn_t *extent_start);
+int xc_domain_decrease_reservation(xc_interface *xch,
+                                   uint32_t domid,
+                                   unsigned long nr_extents,
+                                   unsigned int extent_order,
+                                   xen_pfn_t *extent_start);
 
-int xc_domain_memory_set_pod_target(xc_interface *xch,
-                                    uint32_t domid,
-                                    uint64_t target_pages,
-                                    uint64_t *tot_pages,
-                                    uint64_t *pod_cache_pages,
-                                    uint64_t *pod_entries);
+int xc_domain_decrease_reservation_exact(xc_interface *xch,
+                                         uint32_t domid,
+                                         unsigned long nr_extents,
+                                         unsigned int extent_order,
+                                         xen_pfn_t *extent_start);
 
-int xc_domain_memory_get_pod_target(xc_interface *xch,
-                                    uint32_t domid,
-                                    uint64_t *tot_pages,
-                                    uint64_t *pod_cache_pages,
-                                    uint64_t *pod_entries);
+int xc_domain_populate_physmap(xc_interface *xch,
+                               uint32_t domid,
+                               unsigned long nr_extents,
+                               unsigned int extent_order,
+                               unsigned int mem_flags,
+                               xen_pfn_t *extent_start);
+
+int xc_domain_populate_physmap_exact(xc_interface *xch,
+                                     uint32_t domid,
+                                     unsigned long nr_extents,
+                                     unsigned int extent_order,
+                                     unsigned int mem_flags,
+                                     xen_pfn_t *extent_start);
+
+/* Temporary for compatibility */
+#define xc_domain_memory_populate_physmap(x, d, nr, eo, mf, es) \
+    xc_domain_populate_physmap_exact(x, d, nr, eo, mf, es)
+
+int xc_domain_set_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t target_pages,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries);
+
+int xc_domain_get_pod_target(xc_interface *xch,
+                             uint32_t domid,
+                             uint64_t *tot_pages,
+                             uint64_t *pod_cache_pages,
+                             uint64_t *pod_entries);
 
 int xc_domain_ioport_permission(xc_interface *xch,
                                 uint32_t domid,
diff -r 15c4f1cde006 -r 6834151bfad7 tools/libxl/libxl.c
--- a/tools/libxl/libxl.c       Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/libxl/libxl.c       Tue Oct 12 15:06:42 2010 +0100
@@ -2948,11 +2948,11 @@ retry_transaction:
     }
 
     new_target_memkb -= videoram;
-    rc = xc_domain_memory_set_pod_target(ctx->xch, domid,
+    rc = xc_domain_set_pod_target(ctx->xch, domid,
             new_target_memkb / 4, NULL, NULL, NULL);
     if (rc != 0) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR,
-                "xc_domain_memory_set_pod_target domid=%d, memkb=%d "
+                "xc_domain_set_pod_target domid=%d, memkb=%d "
                 "failed rc=%d\n", domid, new_target_memkb / 4,
                 rc);
         abort = 1;
diff -r 15c4f1cde006 -r 6834151bfad7 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Tue Oct 12 15:06:41 2010 +0100
+++ b/tools/python/xen/lowlevel/xc/xc.c Tue Oct 12 15:06:42 2010 +0100
@@ -1635,8 +1635,8 @@ static PyObject *pyxc_domain_set_target_
 
     mem_pages = mem_kb / 4; 
 
-    if (xc_domain_memory_set_pod_target(self->xc_handle, dom, mem_pages,
-                                        NULL, NULL, NULL) != 0)
+    if (xc_domain_set_pod_target(self->xc_handle, dom, mem_pages,
+                                NULL, NULL, NULL) != 0)
         return pyxc_error_to_exception(self->xc_handle);
     
     Py_INCREF(zero);

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>