stubdom: add live migration support by having ioemu just notify the
hypervisor about memory changes.
The impact on disk performance is typically making it from 71.5MBps down
to 70.5Mbps during the live migration. The impact on network performance
is actually even hard to measure.
Signed-off-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxxx>
diff -r c069dbd814cd tools/ioemu/target-i386-dm/exec-dm.c
--- a/tools/ioemu/target-i386-dm/exec-dm.c Fri Jun 20 18:39:37 2008 +0100
+++ b/tools/ioemu/target-i386-dm/exec-dm.c Fri Jun 27 19:33:30 2008 +0100
@@ -483,9 +483,11 @@
}
#endif
-void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
- int len, int is_write)
+void cpu_physical_memory_rw(target_phys_addr_t _addr, uint8_t *buf,
+ int _len, int is_write)
{
+ target_phys_addr_t addr = _addr;
+ int len = _len;
int l, io_index;
uint8_t *ptr;
uint32_t val;
@@ -520,6 +522,7 @@
} else if ((ptr = phys_ram_addr(addr)) != NULL) {
/* Writing to RAM */
memcpy_words(ptr, buf, l);
+#ifndef CONFIG_STUBDOM
if (logdirty_bitmap != NULL) {
/* Record that we have dirtied this frame */
unsigned long pfn = addr >> TARGET_PAGE_BITS;
@@ -531,6 +534,7 @@
|= 1UL << pfn % HOST_LONG_BITS;
}
}
+#endif
#ifdef __ia64__
sync_icache(ptr, l);
#endif
@@ -565,6 +569,13 @@
buf += l;
addr += l;
}
+
+#ifdef CONFIG_STUBDOM
+ if (logdirty_bitmap != NULL)
+ xc_hvm_modified_memory(xc_handle, domid, _addr >> TARGET_PAGE_BITS,
+ (_addr + _len + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS
+ - _addr >> TARGET_PAGE_BITS);
+#endif
mapcache_unlock();
}
diff -r c069dbd814cd tools/ioemu/xenstore.c
--- a/tools/ioemu/xenstore.c Fri Jun 20 18:39:37 2008 +0100
+++ b/tools/ioemu/xenstore.c Fri Jun 27 19:33:30 2008 +0100
@@ -404,6 +404,10 @@
/* No key yet: wait for the next watch */
return;
+#ifdef CONFIG_STUBDOM
+ /* We pass the writes to hypervisor */
+ seg = (void*)1;
+#else
strncpy(key_terminated, key_ascii, 16);
free(key_ascii);
key = (key_t) strtoull(key_terminated, NULL, 16);
@@ -419,11 +423,6 @@
fprintf(logfile, "%s: key=%16.16llx size=%lu\n", __FUNCTION__,
(unsigned long long)key, logdirty_bitmap_size);
-#ifdef CONFIG_STUBDOM
- /* XXX we just can't use shm. */
- fprintf(logfile, "Log dirty is not implemented in stub domains!\n");
- return;
-#else
shmid = shmget(key, 2 * logdirty_bitmap_size, S_IRUSR|S_IWUSR);
if (shmid == -1) {
fprintf(logfile, "Log-dirty: shmget failed: segment %16.16llx "
diff -r c069dbd814cd tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c Fri Jun 20 18:39:37 2008 +0100
+++ b/tools/libxc/xc_misc.c Fri Jun 27 19:33:30 2008 +0100
@@ -267,6 +267,34 @@
return rc;
}
+int xc_hvm_modified_memory(
+ int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr)
+{
+ DECLARE_HYPERCALL;
+ struct xen_hvm_modified_memory arg;
+ int rc;
+
+ hypercall.op = __HYPERVISOR_hvm_op;
+ hypercall.arg[0] = HVMOP_modified_memory;
+ hypercall.arg[1] = (unsigned long)&arg;
+
+ arg.domid = dom;
+ arg.first_pfn = first_pfn;
+ arg.nr = nr;
+
+ if ( (rc = lock_pages(&arg, sizeof(arg))) != 0 )
+ {
+ PERROR("Could not lock memory");
+ return rc;
+ }
+
+ rc = do_xen_hypercall(xc_handle, &hypercall);
+
+ unlock_pages(&arg, sizeof(arg));
+
+ return rc;
+}
+
void *xc_map_foreign_pages(int xc_handle, uint32_t dom, int prot,
const xen_pfn_t *arr, int num)
{
diff -r c069dbd814cd tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Fri Jun 20 18:39:37 2008 +0100
+++ b/tools/libxc/xenctrl.h Fri Jun 27 19:33:30 2008 +0100
@@ -929,6 +929,12 @@
uint64_t first_pfn, uint64_t nr,
unsigned long *bitmap);
+/*
+ * Notify that some pages got modified by the Device Model
+ */
+int xc_hvm_modified_memory(
+ int xc_handle, domid_t dom, uint64_t first_pfn, uint64_t nr);
+
typedef enum {
XC_ERROR_NONE = 0,
XC_INTERNAL_ERROR = 1,
diff -r c069dbd814cd xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Fri Jun 20 18:39:37 2008 +0100
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Jun 27 19:33:30 2008 +0100
@@ -204,6 +204,53 @@
rc = -ENOSYS;
break;
+ case HVMOP_modified_memory:
+ {
+ struct xen_hvm_modified_memory a;
+ struct domain *d;
+ unsigned long pfn;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( a.domid == DOMID_SELF )
+ {
+ d = rcu_lock_current_domain();
+ }
+ else
+ {
+ if ( (d = rcu_lock_domain_by_id(a.domid)) == NULL )
+ return -ESRCH;
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ {
+ rc = -EPERM;
+ goto param_fail3;
+ }
+ }
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail3;
+
+ rc = -EINVAL;
+ if ( a.first_pfn > domain_get_maximum_gpfn(d)
+ || a.first_pfn + a.nr - 1 < a.first_pfn
+ || a.first_pfn + a.nr - 1 > domain_get_maximum_gpfn(d))
+ goto param_fail3;
+
+ rc = 0;
+ if ( !d->arch.shadow_bitmap )
+ goto param_fail3;
+
+ for (pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++)
+ if (pfn < d->arch.shadow_bitmap_size)
+ set_bit(pfn, d->arch.shadow_bitmap);
+
+ param_fail3:
+ rcu_unlock_domain(d);
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "Bad HVM op %ld.\n", op);
rc = -ENOSYS;
diff -r c069dbd814cd xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Fri Jun 20 18:39:37 2008 +0100
+++ b/xen/arch/x86/hvm/hvm.c Fri Jun 27 19:33:30 2008 +0100
@@ -2529,6 +2529,64 @@
break;
}
+ case HVMOP_modified_memory:
+ {
+ struct xen_hvm_modified_memory a;
+ struct domain *d;
+ unsigned long pfn;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( a.domid == DOMID_SELF )
+ {
+ d = rcu_lock_current_domain();
+ }
+ else
+ {
+ if ( (d = rcu_lock_domain_by_id(a.domid)) == NULL )
+ return -ESRCH;
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ {
+ rc = -EPERM;
+ goto param_fail3;
+ }
+ }
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail3;
+
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail3;
+
+ rc = -EINVAL;
+ if ( a.first_pfn > domain_get_maximum_gpfn(d)
+ || a.first_pfn + a.nr - 1 < a.first_pfn
+ || a.first_pfn + a.nr - 1 > domain_get_maximum_gpfn(d))
+ goto param_fail3;
+
+ rc = 0;
+ if ( !paging_mode_log_dirty(d) )
+ goto param_fail3;
+
+ for (pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++) {
+ p2m_type_t t;
+ mfn_t mfn = gfn_to_mfn(d, pfn, &t);
+ if (mfn_x(mfn) != INVALID_MFN) {
+ paging_mark_dirty(d, mfn_x(mfn));
+ /* These are most probably not page tables any more */
+ /* don't take a long time and don't die either */
+ sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+ }
+ }
+
+ param_fail3:
+ rcu_unlock_domain(d);
+ break;
+ }
+
default:
{
gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
diff -r c069dbd814cd xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Fri Jun 20 18:39:37 2008 +0100
+++ b/xen/include/public/hvm/hvm_op.h Fri Jun 27 19:33:30 2008 +0100
@@ -92,6 +92,19 @@
typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
+/* Notify that some pages got modified by the Device Model. */
+#define HVMOP_modified_memory 7
+struct xen_hvm_modified_memory {
+ /* Domain to be updated. */
+ domid_t domid;
+ /* First pfn. */
+ uint64_aligned_t first_pfn;
+ /* Number of pages. */
+ uint64_aligned_t nr;
+};
+typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|