The balloon driver in the guest frees guest pages and marks them as
mmio. When the kernel crashes and the crash kernel attempts to read the
oldmem via /proc/vmcore a read from ballooned pages will generate 100%
load in dom0 because Xen asks qemu-dm for the page content. Since the
reads come in as 8byte requests each ballooned page is tried 512 times.
Add a new hvmop HVMOP_get_mem_type to return the hvmmem_type_t for the
given pfn. Pages which are neither ram or mmio will be HVMMEM_mmio_dm.
This interface enables the crash kernel to skip ballooned pages.
Signed-off-by: Olaf Hering <olaf@xxxxxxxxx>
diff -r 10f27b8b3d63 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Mon May 02 12:00:40 2011 +0100
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Tue May 03 21:12:22 2011 +0200
@@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
break;
}
+ case HVMOP_get_mem_type:
case HVMOP_set_mem_type:
case HVMOP_set_mem_access:
case HVMOP_get_mem_access:
diff -r 10f27b8b3d63 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon May 02 12:00:40 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c Tue May 03 21:12:22 2011 +0200
@@ -3676,6 +3676,37 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
}
+ case HVMOP_get_mem_type:
+ {
+ struct xen_hvm_get_mem_type a;
+ struct domain *d;
+ p2m_type_t t;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( is_hvm_domain(d) )
+ {
+ gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
+ if ( p2m_is_mmio(t) )
+ a.mem_type = HVMMEM_mmio_dm;
+ else if ( p2m_is_readonly(t) )
+ a.mem_type = HVMMEM_ram_ro;
+ else if ( p2m_is_ram(t) )
+ a.mem_type = HVMMEM_ram_rw;
+ else
+ a.mem_type = HVMMEM_mmio_dm;
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ }
+ rcu_unlock_domain(d);
+ break;
+ }
+
case HVMOP_set_mem_type:
{
struct xen_hvm_set_mem_type a;
diff -r 10f27b8b3d63 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Mon May 02 12:00:40 2011 +0100
+++ b/xen/include/public/hvm/hvm_op.h Tue May 03 21:12:22 2011 +0200
@@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
/* Flushes all VCPU TLBs: @arg must be NULL. */
#define HVMOP_flush_tlbs 5
+typedef enum {
+ HVMMEM_ram_rw, /* Normal read/write guest RAM */
+ HVMMEM_ram_ro, /* Read-only; writes are discarded */
+ HVMMEM_mmio_dm, /* Reads and write go to the device model */
+} hvmmem_type_t;
+
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
@@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
#define HVMOP_set_mem_type 8
-typedef enum {
- HVMMEM_ram_rw, /* Normal read/write guest RAM */
- HVMMEM_ram_ro, /* Read-only; writes are discarded */
- HVMMEM_mmio_dm, /* Reads and write go to the device model */
-} hvmmem_type_t;
/* Notify that a region of memory is to be treated in a specific way. */
struct xen_hvm_set_mem_type {
/* Domain to be updated. */
@@ -225,4 +226,18 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_t
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
+#define HVMOP_get_mem_type 15
+/* Return hvmmem_type_t for the specified pfn. */
+struct xen_hvm_get_mem_type {
+ /* Domain to be queried. */
+ domid_t domid;
+ /* OUT variable. */
+ uint16_t mem_type;
+ uint16_t pad[2]; /* align next field on 8-byte boundary */
+ /* IN variable. */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
+
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|