Implement libxc interface to PoD functionality:
* Add xc_hvm_build_target_mem(), which takes both memsize and target.
Memsize is the total memory, allocated in PoD pages and reported in
the e820; target is the size of the cache. If these are the same, the
normal funcitonality is called. (So you can use the same function to
build always, and it will decide whether to use PoD or not.)
* Add xc_domain_memory_[gs]et_pod_target(), which sets and/or returns
information about the PoD cache and p2m entries.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
diff -r 2e9bacf8915c tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Fri Dec 19 17:54:23 2008 +0000
+++ b/tools/libxc/xc_domain.c Fri Dec 19 17:54:45 2008 +0000
@@ -562,6 +562,76 @@
}
return err;
+}
+
+static int xc_domain_memory_pod_target(int xc_handle,
+ int op,
+ uint32_t domid,
+ uint64_t target_pages,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries)
+{
+ int err;
+
+ struct xen_pod_target pod_target = {
+ .domid = domid,
+ .target_pages = target_pages
+ };
+
+ err = xc_memory_op(xc_handle, op, &pod_target);
+
+ if ( err < 0 )
+ {
+ DPRINTF("Failed %s_memory_target dom %d\n",
+ (op==XENMEM_set_pod_target)?"set":"get",
+ domid);
+ errno = -err;
+ err = -1;
+ }
+ else
+ err = 0;
+
+ if ( tot_pages )
+ *tot_pages = pod_target.tot_pages;
+ if ( pod_cache_pages )
+ *pod_cache_pages = pod_target.pod_cache_pages;
+ if ( pod_entries )
+ *pod_entries = pod_target.pod_entries;
+
+ return err;
+}
+
+
+int xc_domain_memory_set_pod_target(int xc_handle,
+ uint32_t domid,
+ uint64_t target_pages,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries)
+{
+ return xc_domain_memory_pod_target(xc_handle,
+ XENMEM_set_pod_target,
+ domid,
+ target_pages,
+ tot_pages,
+ pod_cache_pages,
+ pod_entries);
+}
+
+int xc_domain_memory_get_pod_target(int xc_handle,
+ uint32_t domid,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries)
+{
+ return xc_domain_memory_pod_target(xc_handle,
+ XENMEM_get_pod_target,
+ domid,
+ -1,
+ tot_pages,
+ pod_cache_pages,
+ pod_entries);
}
int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max)
diff -r 2e9bacf8915c tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c Fri Dec 19 17:54:23 2008 +0000
+++ b/tools/libxc/xc_hvm_build.c Fri Dec 19 17:54:45 2008 +0000
@@ -146,11 +146,13 @@
}
static int setup_guest(int xc_handle,
- uint32_t dom, int memsize,
+ uint32_t dom, int memsize, int target,
char *image, unsigned long image_size)
{
xen_pfn_t *page_array = NULL;
unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
+ unsigned long target_pages = (unsigned long)target << (20 - PAGE_SHIFT);
+ unsigned long pod_pages = 0;
unsigned long special_page_nr, entry_eip, cur_pages;
struct xen_add_to_physmap xatp;
struct shared_info *shared_info;
@@ -160,10 +162,15 @@
uint64_t v_start, v_end;
int rc;
xen_capabilities_info_t caps;
+ int pod_mode = 0;
+
/* An HVM guest must be initialised with at least 2MB memory. */
- if ( memsize < 2 )
+ if ( memsize < 2 || target < 2 )
goto error_out;
+
+ if ( memsize > target )
+ pod_mode = 1;
if ( elf_init(&elf, image, image_size) != 0 )
goto error_out;
@@ -235,6 +242,10 @@
.extent_order = SUPERPAGE_PFN_SHIFT,
.domid = dom
};
+
+ if ( pod_mode )
+ sp_req.mem_flags = XENMEMF_populate_on_demand;
+
set_xen_guest_handle(sp_req.extent_start, sp_extents);
for ( i = 0; i < sp_req.nr_extents; i++ )
sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_PFN_SHIFT)];
@@ -242,6 +253,11 @@
if ( done > 0 )
{
done <<= SUPERPAGE_PFN_SHIFT;
+ if ( pod_mode && target_pages > cur_pages )
+ {
+ int d = target_pages - cur_pages;
+ pod_pages += ( done < d ) ? done : d;
+ }
cur_pages += done;
count -= done;
}
@@ -253,8 +269,16 @@
rc = xc_domain_memory_populate_physmap(
xc_handle, dom, count, 0, 0, &page_array[cur_pages]);
cur_pages += count;
+ if ( pod_mode )
+ pod_pages -= count;
}
}
+
+ if ( pod_mode )
+ rc = xc_domain_memory_set_pod_target(xc_handle,
+ dom,
+ pod_pages,
+ NULL, NULL, NULL);
if ( rc != 0 )
{
@@ -354,6 +378,7 @@
static int xc_hvm_build_internal(int xc_handle,
uint32_t domid,
int memsize,
+ int target,
char *image,
unsigned long image_size)
{
@@ -363,7 +388,7 @@
return -1;
}
- return setup_guest(xc_handle, domid, memsize, image, image_size);
+ return setup_guest(xc_handle, domid, memsize, target, image, image_size);
}
static inline int is_loadable_phdr(Elf32_Phdr *phdr)
@@ -388,7 +413,34 @@
((image = xc_read_image(image_name, &image_size)) == NULL) )
return -1;
- sts = xc_hvm_build_internal(xc_handle, domid, memsize, image, image_size);
+ sts = xc_hvm_build_internal(xc_handle, domid, memsize, memsize,
image, image_size);
+
+ free(image);
+
+ return sts;
+}
+
+/* xc_hvm_build_target_mem:
+ * Create a domain for a pre-ballooned virtualized Linux, using
+ * files/filenames. If target < memsize, domain is created with
+ * memsize pages marked populate-on-demand, and with a PoD cache size
+ * of target. If target == memsize, pages are populated normally.
+ */
+int xc_hvm_build_target_mem(int xc_handle,
+ uint32_t domid,
+ int memsize,
+ int target,
+ const char *image_name)
+{
+ char *image;
+ int sts;
+ unsigned long image_size;
+
+ if ( (image_name == NULL) ||
+ ((image = xc_read_image(image_name, &image_size)) == NULL) )
+ return -1;
+
+ sts = xc_hvm_build_internal(xc_handle, domid, memsize, target,
image, image_size);
free(image);
@@ -423,7 +475,7 @@
return -1;
}
- sts = xc_hvm_build_internal(xc_handle, domid, memsize,
+ sts = xc_hvm_build_internal(xc_handle, domid, memsize, memsize,
img, img_len);
/* xc_inflate_buffer may return the original buffer pointer (for
diff -r 2e9bacf8915c tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c Fri Dec 19 17:54:23 2008 +0000
+++ b/tools/libxc/xc_private.c Fri Dec 19 17:54:45 2008 +0000
@@ -323,6 +323,14 @@
goto out1;
}
break;
+ case XENMEM_set_pod_target:
+ case XENMEM_get_pod_target:
+ if ( lock_pages(arg, sizeof(struct xen_pod_target)) )
+ {
+ PERROR("Could not lock");
+ goto out1;
+ }
+ break;
}
ret = do_xen_hypercall(xc_handle, &hypercall);
@@ -354,6 +362,10 @@
case XENMEM_maximum_reservation:
case XENMEM_maximum_gpfn:
unlock_pages(arg, sizeof(domid_t));
+ break;
+ case XENMEM_set_pod_target:
+ case XENMEM_get_pod_target:
+ unlock_pages(arg, sizeof(struct xen_pod_target));
break;
}
diff -r 2e9bacf8915c tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Fri Dec 19 17:54:23 2008 +0000
+++ b/tools/libxc/xenctrl.h Fri Dec 19 17:54:45 2008 +0000
@@ -634,6 +634,19 @@
xen_pfn_t *gpfn_list,
xen_pfn_t *mfn_list);
+int xc_domain_memory_set_pod_target(int xc_handle,
+ uint32_t domid,
+ uint64_t target_pages,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries);
+
+int xc_domain_memory_get_pod_target(int xc_handle,
+ uint32_t domid,
+ uint64_t *tot_pages,
+ uint64_t *pod_cache_pages,
+ uint64_t *pod_entries);
+
int xc_domain_ioport_permission(int xc_handle,
uint32_t domid,
uint32_t first_port,
diff -r 2e9bacf8915c tools/libxc/xenguest.h
--- a/tools/libxc/xenguest.h Fri Dec 19 17:54:23 2008 +0000
+++ b/tools/libxc/xenguest.h Fri Dec 19 17:54:45 2008 +0000
@@ -130,6 +130,12 @@
int memsize,
const char *image_name);
+int xc_hvm_build_target_mem(int xc_handle,
+ uint32_t domid,
+ int memsize,
+ int target,
+ const char *image_name);
+
int xc_hvm_build_mem(int xc_handle,
uint32_t domid,
int memsize,
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|