Keir:
Despite of the negotioatiom of nested virtualization wrapper, here comes
with the global configuration parameter and the CR4 layout handling between SVM
& VMX. These are wrapper neutral IMO.
Thx, Eddie
===Patch1
Nested virtualization usage model is emerging, however we must guarantee it
won't impact the performance of simple virtualization.
This patch add an boot parameter for nested virtualization, which is disabled
by default for now.
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
diff -r 1385b15e168f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Fri Oct 08 10:29:38 2010 +0800
@@ -66,6 +66,9 @@ unsigned int opt_hvm_debug_level __read_
unsigned int opt_hvm_debug_level __read_mostly;
integer_param("hvm_debug", opt_hvm_debug_level);
+unsigned int enable_nested_hvm __read_mostly;
+integer_param("nested_hvm", enable_nested_hvm);
+
struct hvm_function_table hvm_funcs __read_mostly;
/* I/O permission bitmap is globally shared by all HVM guests. */
diff -r 1385b15e168f xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Fri Oct 08 10:35:03 2010 +0800
@@ -250,6 +250,9 @@ hvm_set_segment_register(struct vcpu *v,
#define is_viridian_domain(_d) \
(is_hvm_domain(_d) && ((_d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN]))
+/* TODO: handle per domain configuration */
+#define is_nestedhvm(_d) (enable_nested_hvm && is_hvm_domain(_d))
+
void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
void hvm_migrate_timers(struct vcpu *v);
===Patch2
This patch solves the CR4 bit (VMXE) format difference between AMD & Intel
processor.
Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
diff -r 1385b15e168f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Fri Oct 08 10:46:55 2010 +0800
@@ -630,6 +630,11 @@ static int hvm_load_cpu_ctxt(struct doma
struct hvm_hw_cpu ctxt;
struct segment_register seg;
struct vcpu_guest_context *vc;
+ unsigned long rsv_bits = HVM_CR4_GUEST_RESERVED_BITS;
+
+ if ( !is_nestedhvm(d) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ rsv_bits |= X86_CR4_VMXE;
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
@@ -662,7 +667,7 @@ static int hvm_load_cpu_ctxt(struct doma
return -EINVAL;
}
- if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS )
+ if ( ctxt.cr4 & rsv_bits )
{
gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n",
ctxt.cr4);
@@ -1243,8 +1248,12 @@ int hvm_set_cr4(unsigned long value)
{
struct vcpu *v = current;
unsigned long old_cr;
-
- if ( value & HVM_CR4_GUEST_RESERVED_BITS )
+ unsigned long rsv_bits = HVM_CR4_GUEST_RESERVED_BITS;
+
+ if ( !is_nestedhvm(v->domain) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ rsv_bits |= X86_CR4_VMXE;
+ if ( value & rsv_bits )
{
HVM_DBG_LOG(DBG_LEVEL_1,
"Guest attempts to set reserved bit in CR4: %lx",
diff -r 1385b15e168f xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Fri Oct 08 10:46:35 2010 +0800
@@ -291,7 +291,8 @@ static inline int hvm_do_pmu_interrupt(s
X86_CR4_DE | X86_CR4_PSE | X86_CR4_PAE | \
X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE | \
X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT | \
- (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))))
+ (cpu_has_xsave ? X86_CR4_OSXSAVE : 0))) | \
+ X86_CR4_VMXE)
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
patch1
Description: patch1
patch2
Description: patch2
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|