On Thu, Nov 23, 2006 at 07:20:29PM +0000, Keir Fraser wrote:
> I'm imminently going to change the interface so that tools always map HVM
> guest pages by pfn rather than mfn. That sidesteps this annoying issue.
How about the below. For now I've disabled HVM dumps until this
interface exists, but it should be ready to go.
# HG changeset patch
# User john.levon@xxxxxxx
# Date 1164314513 28800
# Node ID c2cb4da91cd201f2645b1ead3964bcfecb5eef1a
# Parent 38037855268048b1326b8b86aad6736da88eea95
Use the guest's own p2m table instead of xc_get_pfn_list(), which cannot handle
PFNs with no MFN.
Dump a zeroed page for PFNs with no MFN.
Clearly deprecate xc_get_pfn_list().
Do not include a P2M table with HVM domains.
Refuse to dump HVM until we can map its pages with PFNs.
Signed-off-by: John Levon <john.levon@xxxxxxx>
diff --git a/tools/libxc/xc_core.c b/tools/libxc/xc_core.c
--- a/tools/libxc/xc_core.c
+++ b/tools/libxc/xc_core.c
@@ -21,44 +21,139 @@ copy_from_domain_page(int xc_handle,
return 0;
}
+static int
+map_p2m(int xc_handle, xc_dominfo_t *info, xen_pfn_t **live_p2m,
+ unsigned long *pfnp)
+{
+ /* Double and single indirect references to the live P2M table */
+ xen_pfn_t *live_p2m_frame_list_list = NULL;
+ xen_pfn_t *live_p2m_frame_list = NULL;
+ shared_info_t *live_shinfo = NULL;
+ uint32_t dom = info->domid;
+ unsigned long max_pfn = 0;
+ int ret = -1;
+ int err;
+
+ /* Map the shared info frame */
+ live_shinfo = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ, info->shared_info_frame);
+
+ if ( !live_shinfo )
+ {
+ PERROR("Couldn't map live_shinfo");
+ goto out;
+ }
+
+ max_pfn = live_shinfo->arch.max_pfn;
+
+ if ( max_pfn < info->nr_pages )
+ {
+ ERROR("max_pfn < nr_pages -1 (%lx < %lx", max_pfn, info->nr_pages - 1);
+ goto out;
+ }
+
+ live_p2m_frame_list_list =
+ xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, PROT_READ,
+ live_shinfo->arch.pfn_to_mfn_frame_list_list);
+
+ if ( !live_p2m_frame_list_list )
+ {
+ PERROR("Couldn't map p2m_frame_list_list (errno %d)", errno);
+ goto out;
+ }
+
+ live_p2m_frame_list =
+ xc_map_foreign_batch(xc_handle, dom, PROT_READ,
+ live_p2m_frame_list_list,
+ P2M_FLL_ENTRIES);
+
+ if ( !live_p2m_frame_list )
+ {
+ PERROR("Couldn't map p2m_frame_list");
+ goto out;
+ }
+
+ *live_p2m = xc_map_foreign_batch(xc_handle, dom, PROT_READ,
+ live_p2m_frame_list,
+ P2M_FL_ENTRIES);
+
+ if ( !live_p2m )
+ {
+ PERROR("Couldn't map p2m table");
+ goto out;
+ }
+
+ *pfnp = max_pfn;
+
+ ret = 0;
+
+out:
+ err = errno;
+
+ if ( live_shinfo )
+ munmap(live_shinfo, PAGE_SIZE);
+
+ if ( live_p2m_frame_list_list )
+ munmap(live_p2m_frame_list_list, PAGE_SIZE);
+
+ if ( live_p2m_frame_list )
+ munmap(live_p2m_frame_list, P2M_FLL_ENTRIES * PAGE_SIZE);
+
+ errno = err;
+ return ret;
+}
+
int
xc_domain_dumpcore_via_callback(int xc_handle,
uint32_t domid,
void *args,
dumpcore_rtn_t dump_rtn)
{
- unsigned long nr_pages;
- xen_pfn_t *page_array = NULL;
+ unsigned long nr_pages = 0;
+ unsigned long max_pfn = 0;
xc_dominfo_t info;
int i, nr_vcpus = 0;
char *dump_mem, *dump_mem_start = NULL;
struct xc_core_header header;
+ xen_pfn_t *p2m = NULL;
vcpu_guest_context_t ctxt[MAX_VIRT_CPUS];
char dummy[PAGE_SIZE];
int dummy_len;
- int sts;
+ int sts = -1;
if ( (dump_mem_start = malloc(DUMP_INCREMENT*PAGE_SIZE)) == NULL )
{
PERROR("Could not allocate dump_mem");
- goto error_out;
+ goto out;
}
if ( xc_domain_getinfo(xc_handle, domid, 1, &info) != 1 )
{
PERROR("Could not get info for domain");
- goto error_out;
+ goto out;
+ }
+
+ if ( info.hvm )
+ {
+ ERROR("Cannot dump HVM domains");
+ goto out;
}
if ( domid != info.domid )
{
PERROR("Domain %d does not exist", domid);
- goto error_out;
+ goto out;
}
for ( i = 0; i <= info.max_vcpu_id; i++ )
if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0)
nr_vcpus++;
+
+ if ( nr_vcpus == 0 )
+ {
+ PERROR("No VCPU context could be grabbed");
+ goto out;
+ }
nr_pages = info.nr_pages;
@@ -68,60 +163,66 @@ xc_domain_dumpcore_via_callback(int xc_h
header.xch_ctxt_offset = sizeof(struct xc_core_header);
header.xch_index_offset = sizeof(struct xc_core_header) +
sizeof(vcpu_guest_context_t)*nr_vcpus;
- dummy_len = (sizeof(struct xc_core_header) +
- (sizeof(vcpu_guest_context_t) * nr_vcpus) +
- (nr_pages * sizeof(xen_pfn_t)));
+ dummy_len = sizeof(struct xc_core_header) +
+ sizeof(vcpu_guest_context_t) * nr_vcpus;
+ if ( !info.hvm )
+ dummy_len += nr_pages * sizeof(xen_pfn_t);
header.xch_pages_offset = round_pgup(dummy_len);
sts = dump_rtn(args, (char *)&header, sizeof(struct xc_core_header));
if ( sts != 0 )
- goto error_out;
+ goto out;
sts = dump_rtn(args, (char *)&ctxt, sizeof(ctxt[0]) * nr_vcpus);
if ( sts != 0 )
- goto error_out;
-
- if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
- {
- IPRINTF("Could not allocate memory\n");
- goto error_out;
- }
- if ( xc_get_pfn_list(xc_handle, domid, page_array, nr_pages) != nr_pages )
- {
- IPRINTF("Could not get the page frame list\n");
- goto error_out;
- }
- sts = dump_rtn(args, (char *)page_array, nr_pages * sizeof(xen_pfn_t));
- if ( sts != 0 )
- goto error_out;
+ goto out;
+
+ if ( !info.hvm )
+ {
+ sts = map_p2m(xc_handle, &info, &p2m, &max_pfn);
+ if ( sts != 0 )
+ goto out;
+
+ sts = dump_rtn(args, (char *)p2m, sizeof (xen_pfn_t) * nr_pages);
+ if ( sts != 0 )
+ goto out;
+ }
/* Pad the output data to page alignment. */
memset(dummy, 0, PAGE_SIZE);
sts = dump_rtn(args, dummy, header.xch_pages_offset - dummy_len);
if ( sts != 0 )
- goto error_out;
+ goto out;
for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
{
- copy_from_domain_page(xc_handle, domid, page_array[i], dump_mem);
+ if ( !info.hvm && p2m[i] == -1 )
+ memset(dump_mem, 0, PAGE_SIZE);
+ else
+ copy_from_domain_page(xc_handle, domid,
+ (info.hvm) ? i : p2m[i], dump_mem);
dump_mem += PAGE_SIZE;
if ( ((i + 1) % DUMP_INCREMENT == 0) || ((i + 1) == nr_pages) )
{
sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
if ( sts != 0 )
- goto error_out;
+ goto out;
dump_mem = dump_mem_start;
}
}
+ sts = 0;
+
+out:
+ if ( p2m )
+ {
+ if ( info.hvm )
+ free ( p2m );
+ else
+ munmap(p2m, P2M_SIZE);
+ }
free(dump_mem_start);
- free(page_array);
- return 0;
-
- error_out:
- free(dump_mem_start);
- free(page_array);
- return -1;
+ return sts;
}
/* Callback args for writing to a local dump file. */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -104,11 +104,17 @@ int xc_find_device_number(const char *na
*/
typedef struct xc_core_header {
+ /* XC_CORE_MAGIC / XC_CORE_MAGIC_HVM */
unsigned int xch_magic;
+ /* nr of vcpu_guest_contexts at xch_ctxt_offset */
unsigned int xch_nr_vcpus;
+ /* nr of pages at xch_pages_offset, xch_index_offset */
unsigned int xch_nr_pages;
+ /* offset of VCPU contexts */
unsigned int xch_ctxt_offset;
+ /* offset of PFN->MFN array. Empty for HVM guests */
unsigned int xch_index_offset;
+ /* offset of dump ages */
unsigned int xch_pages_offset;
} xc_core_header_t;
@@ -511,6 +517,10 @@ unsigned long xc_translate_foreign_addre
unsigned long xc_translate_foreign_address(int xc_handle, uint32_t dom,
int vcpu, unsigned long long virt);
+/**
+ * DEPRECATED. Avoid using this, as it does not correctly account for PFNs
+ * without a backing MFN.
+ */
int xc_get_pfn_list(int xc_handle, uint32_t domid, xen_pfn_t *pfn_buf,
unsigned long max_pfns);
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -119,6 +119,25 @@ typedef unsigned long l4_pgentry_t;
(((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
#endif
+#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
+
+/* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
+#define P2M_SIZE ROUNDUP((max_pfn * sizeof(xen_pfn_t)), PAGE_SHIFT)
+
+/* Number of xen_pfn_t in a page */
+#define fpp (PAGE_SIZE/sizeof(xen_pfn_t))
+
+/* Number of entries in the pfn_to_mfn_frame_list_list */
+#define P2M_FLL_ENTRIES (((max_pfn)+(fpp*fpp)-1)/(fpp*fpp))
+
+/* Number of entries in the pfn_to_mfn_frame_list */
+#define P2M_FL_ENTRIES (((max_pfn)+fpp-1)/fpp)
+
+/* Size in bytes of the pfn_to_mfn_frame_list */
+#define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long))
+
+#define INVALID_P2M_ENTRY (~0UL)
+
struct domain_setup_info
{
uint64_t v_start;
diff --git a/tools/libxc/xg_save_restore.h b/tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h
+++ b/tools/libxc/xg_save_restore.h
@@ -73,7 +73,6 @@ static int get_platform_info(int xc_hand
*/
#define PFN_TO_KB(_pfn) ((_pfn) << (PAGE_SHIFT - 10))
-#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
/*
@@ -86,21 +85,6 @@ static int get_platform_info(int xc_hand
#define M2P_SIZE(_m) ROUNDUP(((_m) * sizeof(xen_pfn_t)), M2P_SHIFT)
#define M2P_CHUNKS(_m) (M2P_SIZE((_m)) >> M2P_SHIFT)
-/* Size in bytes of the P2M (rounded up to the nearest PAGE_SIZE bytes) */
-#define P2M_SIZE ROUNDUP((max_pfn * sizeof(xen_pfn_t)), PAGE_SHIFT)
-
-/* Number of xen_pfn_t in a page */
-#define fpp (PAGE_SIZE/sizeof(xen_pfn_t))
-
-/* Number of entries in the pfn_to_mfn_frame_list */
-#define P2M_FL_ENTRIES (((max_pfn)+fpp-1)/fpp)
-
-/* Size in bytes of the pfn_to_mfn_frame_list */
-#define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long))
-
-/* Number of entries in the pfn_to_mfn_frame_list_list */
-#define P2M_FLL_ENTRIES (((max_pfn)+(fpp*fpp)-1)/(fpp*fpp))
-
/* Current guests allow 8MB 'slack' in their P2M */
#define NR_SLACK_ENTRIES ((8 * 1024 * 1024) / PAGE_SIZE)
@@ -109,8 +93,3 @@ static int get_platform_info(int xc_hand
/* Returns TRUE if the PFN is currently mapped */
#define is_mapped(pfn_type) (!((pfn_type) & 0x80000000UL))
-
-#define INVALID_P2M_ENTRY (~0UL)
-
-
-
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|