# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
# Date 1170852106 0
# Node ID 4d7ee9f4336ab57706bcb477542611006ea33447
# Parent 710aec0abb613a545ed9f2f41c7e130f1556494c
[HVM] Save/restore: dynamically calculate the size of the save buffer
Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxxxxx>
---
tools/libxc/xc_domain.c | 8 +++++---
tools/libxc/xc_hvm_save.c | 21 +++++++++++----------
xen/arch/x86/domctl.c | 25 ++++++++++++++++++++-----
xen/arch/x86/hvm/hpet.c | 2 +-
xen/arch/x86/hvm/hvm.c | 3 ++-
xen/arch/x86/hvm/i8254.c | 2 +-
xen/arch/x86/hvm/irq.c | 9 ++++++---
xen/arch/x86/hvm/rtc.c | 2 +-
xen/arch/x86/hvm/save.c | 27 ++++++++++++++++++++++++++-
xen/arch/x86/hvm/vioapic.c | 2 +-
xen/arch/x86/hvm/vlapic.c | 6 ++++--
xen/arch/x86/hvm/vpic.c | 2 +-
xen/include/asm-x86/hvm/support.h | 34 ++++++++++++++++++++++++----------
xen/include/public/domctl.h | 3 ++-
14 files changed, 105 insertions(+), 41 deletions(-)
diff -r 710aec0abb61 -r 4d7ee9f4336a tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Wed Feb 07 10:21:15 2007 +0000
+++ b/tools/libxc/xc_domain.c Wed Feb 07 12:41:46 2007 +0000
@@ -252,12 +252,14 @@ int xc_domain_hvm_getcontext(int xc_hand
domctl.u.hvmcontext.size = size;
set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
- if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
- return ret;
+ if ( ctxt_buf )
+ if ( (ret = lock_pages(ctxt_buf, size)) != 0 )
+ return ret;
ret = do_domctl(xc_handle, &domctl);
- unlock_pages(ctxt_buf, size);
+ if ( ctxt_buf )
+ unlock_pages(ctxt_buf, size);
return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
}
diff -r 710aec0abb61 -r 4d7ee9f4336a tools/libxc/xc_hvm_save.c
--- a/tools/libxc/xc_hvm_save.c Wed Feb 07 10:21:15 2007 +0000
+++ b/tools/libxc/xc_hvm_save.c Wed Feb 07 12:41:46 2007 +0000
@@ -33,12 +33,6 @@
#include "xg_save_restore.h"
/*
- * Size of a buffer big enough to take the HVM state of a domain.
- * Ought to calculate this a bit more carefully, or maybe ask Xen.
- */
-#define HVM_CTXT_SIZE 8192
-
-/*
** Default values for important tuning parameters. Can override by passing
** non-zero replacement values to xc_hvm_save().
**
@@ -286,6 +280,7 @@ int xc_hvm_save(int xc_handle, int io_fd
unsigned long *pfn_batch = NULL;
/* A copy of hvm domain context buffer*/
+ uint32_t hvm_buf_size;
uint8_t *hvm_buf = NULL;
/* Live mapping of shared info structure */
@@ -431,9 +426,15 @@ int xc_hvm_save(int xc_handle, int io_fd
page_array = (unsigned long *) malloc( sizeof(unsigned long) * max_pfn);
- hvm_buf = malloc(HVM_CTXT_SIZE);
-
- if (!to_send ||!to_skip ||!page_array ||!hvm_buf ) {
+ hvm_buf_size = xc_domain_hvm_getcontext(xc_handle, dom, 0, 0);
+ if ( hvm_buf_size == -1 )
+ {
+ ERROR("Couldn't get HVM context size from Xen");
+ goto out;
+ }
+ hvm_buf = malloc(hvm_buf_size);
+
+ if (!to_send ||!to_skip ||!page_array ||!hvm_buf) {
ERROR("Couldn't allocate memory");
goto out;
}
@@ -661,7 +662,7 @@ int xc_hvm_save(int xc_handle, int io_fd
}
if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf,
- HVM_CTXT_SIZE)) == -1) {
+ hvm_buf_size)) == -1) {
ERROR("HVM:Could not get hvm buffer");
goto out;
}
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/domctl.c Wed Feb 07 12:41:46 2007 +0000
@@ -326,10 +326,6 @@ long arch_do_domctl(
struct hvm_domain_context c;
struct domain *d;
- c.cur = 0;
- c.size = domctl->u.hvmcontext.size;
- c.data = NULL;
-
ret = -ESRCH;
if ( (d = get_domain_by_id(domctl->domain)) == NULL )
break;
@@ -338,19 +334,38 @@ long arch_do_domctl(
if ( !is_hvm_domain(d) )
goto gethvmcontext_out;
+ c.cur = 0;
+ c.size = hvm_save_size(d);
+ c.data = NULL;
+
+ if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
+ {
+ /* Client is querying for the correct buffer size */
+ domctl->u.hvmcontext.size = c.size;
+ ret = 0;
+ goto gethvmcontext_out;
+ }
+
+ /* Check that the client has a big enough buffer */
+ ret = -ENOSPC;
+ if ( domctl->u.hvmcontext.size < c.size )
+ goto gethvmcontext_out;
+
+ /* Allocate our own marshalling buffer */
ret = -ENOMEM;
if ( (c.data = xmalloc_bytes(c.size)) == NULL )
goto gethvmcontext_out;
ret = hvm_save(d, &c);
+ domctl->u.hvmcontext.size = c.cur;
if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
ret = -EFAULT;
+ gethvmcontext_out:
if ( copy_to_guest(u_domctl, domctl, 1) )
ret = -EFAULT;
- gethvmcontext_out:
if ( c.data != NULL )
xfree(c.data);
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/hpet.c
--- a/xen/arch/x86/hvm/hpet.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/hpet.c Wed Feb 07 12:41:46 2007 +0000
@@ -409,7 +409,7 @@ static int hpet_load(struct domain *d, h
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load);
+HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
void hpet_init(struct vcpu *v)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c Wed Feb 07 12:41:46 2007 +0000
@@ -227,7 +227,8 @@ static int hvm_load_cpu_ctxt(struct doma
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
+HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
+ 1, HVMSR_PER_VCPU);
int hvm_vcpu_initialise(struct vcpu *v)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/i8254.c
--- a/xen/arch/x86/hvm/i8254.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/i8254.c Wed Feb 07 12:41:46 2007 +0000
@@ -445,7 +445,7 @@ static int pit_load(struct domain *d, hv
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load);
+HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
static void pit_reset(void *opaque)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/irq.c
--- a/xen/arch/x86/hvm/irq.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/irq.c Wed Feb 07 12:41:46 2007 +0000
@@ -480,6 +480,9 @@ static int irq_load_link(struct domain *
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci);
-HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa);
-HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link);
+HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
+ 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa,
+ 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
+ 1, HVMSR_PER_DOM);
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/rtc.c
--- a/xen/arch/x86/hvm/rtc.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/rtc.c Wed Feb 07 12:41:46 2007 +0000
@@ -417,7 +417,7 @@ static int rtc_load(struct domain *d, hv
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load);
+HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
void rtc_init(struct vcpu *v, int base)
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/save.c
--- a/xen/arch/x86/hvm/save.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/save.c Wed Feb 07 12:41:46 2007 +0000
@@ -35,13 +35,16 @@ static struct {
hvm_save_handler save;
hvm_load_handler load;
const char *name;
+ size_t size;
+ int kind;
} hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
/* Init-time function to add entries to that list */
void hvm_register_savevm(uint16_t typecode,
const char *name,
hvm_save_handler save_state,
- hvm_load_handler load_state)
+ hvm_load_handler load_state,
+ size_t size, int kind)
{
ASSERT(typecode <= HVM_SAVE_CODE_MAX);
ASSERT(hvm_sr_handlers[typecode].save == NULL);
@@ -49,6 +52,28 @@ void hvm_register_savevm(uint16_t typeco
hvm_sr_handlers[typecode].save = save_state;
hvm_sr_handlers[typecode].load = load_state;
hvm_sr_handlers[typecode].name = name;
+ hvm_sr_handlers[typecode].size = size;
+ hvm_sr_handlers[typecode].kind = kind;
+}
+
+size_t hvm_save_size(struct domain *d)
+{
+ struct vcpu *v;
+ size_t sz;
+ int i;
+
+ /* Basic overhead for header and footer */
+ sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
+
+ /* Plus space for each thing we will be saving */
+ for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
+ if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
+ for_each_vcpu(d, v)
+ sz += hvm_sr_handlers[i].size;
+ else
+ sz += hvm_sr_handlers[i].size;
+
+ return sz;
}
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/vioapic.c Wed Feb 07 12:41:46 2007 +0000
@@ -514,7 +514,7 @@ static int ioapic_load(struct domain *d,
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load);
+HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
void vioapic_init(struct domain *d)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/vlapic.c
--- a/xen/arch/x86/hvm/vlapic.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/vlapic.c Wed Feb 07 12:41:46 2007 +0000
@@ -904,8 +904,10 @@ static int lapic_load_regs(struct domain
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden);
-HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs);
+HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
+ 1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
+ 1, HVMSR_PER_VCPU);
int vlapic_init(struct vcpu *v)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/arch/x86/hvm/vpic.c
--- a/xen/arch/x86/hvm/vpic.c Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/arch/x86/hvm/vpic.c Wed Feb 07 12:41:46 2007 +0000
@@ -440,7 +440,7 @@ static int vpic_load(struct domain *d, h
return 0;
}
-HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load);
+HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
void vpic_init(struct domain *d)
{
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/include/asm-x86/hvm/support.h Wed Feb 07 12:41:46 2007 +0000
@@ -221,23 +221,37 @@ typedef int (*hvm_load_handler) (struct
typedef int (*hvm_load_handler) (struct domain *d,
hvm_domain_context_t *h);
-/* Init-time function to declare a pair of handlers for a type */
+/* Init-time function to declare a pair of handlers for a type,
+ * and the maximum buffer space needed to save this type of state */
void hvm_register_savevm(uint16_t typecode,
const char *name,
hvm_save_handler save_state,
- hvm_load_handler load_state);
-
-/* Syntactic sugar around that function */
-#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load) \
-static int __hvm_register_##_x##_save_and_restore(void) \
-{ \
- hvm_register_savevm(HVM_SAVE_CODE(_x), #_x, &_save, &_load); \
- return 0; \
-} \
+ hvm_load_handler load_state,
+ size_t size, int kind);
+
+/* The space needed for saving can be per-domain or per-vcpu: */
+#define HVMSR_PER_DOM 0
+#define HVMSR_PER_VCPU 1
+
+/* Syntactic sugar around that function: specify the max number of
+ * saves, and this calculates the size of buffer needed */
+#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k) \
+static int __hvm_register_##_x##_save_and_restore(void) \
+{ \
+ hvm_register_savevm(HVM_SAVE_CODE(_x), \
+ #_x, \
+ &_save, \
+ &_load, \
+ (_num) * (HVM_SAVE_LENGTH(_x) \
+ + sizeof (struct hvm_save_descriptor)), \
+ _k); \
+ return 0; \
+} \
__initcall(__hvm_register_##_x##_save_and_restore);
/* Entry points for saving and restoring HVM domain state */
+size_t hvm_save_size(struct domain *d);
int hvm_save(struct domain *d, hvm_domain_context_t *h);
int hvm_load(struct domain *d, hvm_domain_context_t *h);
diff -r 710aec0abb61 -r 4d7ee9f4336a xen/include/public/domctl.h
--- a/xen/include/public/domctl.h Wed Feb 07 10:21:15 2007 +0000
+++ b/xen/include/public/domctl.h Wed Feb 07 12:41:46 2007 +0000
@@ -390,7 +390,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_setti
#define XEN_DOMCTL_sethvmcontext 34
typedef struct xen_domctl_hvmcontext {
uint32_t size; /* IN/OUT: size of buffer / bytes filled */
- XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT */
+ XEN_GUEST_HANDLE(uint8_t) buffer; /* IN/OUT: data, or call gethvmcontext
+ * with NULL buffer to get size req'd */
} xen_domctl_hvmcontext_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|