# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1186568906 -3600
# Node ID da2c7dab1a3ad37a9e28d1e5c090affc58bebc5d
# Parent 511c41a550458d50ae0adb9de2b392ae8a8e4379
hvm: More cleanups, particularly to %cr4 handling.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
Signed-off-by: Xin Li <xin.b.li@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 66 +++++++++++++-------
xen/arch/x86/hvm/svm/svm.c | 123 ++++++++------------------------------
xen/arch/x86/hvm/vioapic.c | 4 -
xen/arch/x86/hvm/vmx/vmx.c | 94 ++++++-----------------------
xen/arch/x86/mm/hap/hap.c | 5 -
xen/arch/x86/mm/shadow/multi.c | 11 ++-
xen/include/asm-x86/hvm/hvm.h | 13 ++--
xen/include/asm-x86/hvm/support.h | 1
8 files changed, 114 insertions(+), 203 deletions(-)
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c Wed Aug 08 11:28:26 2007 +0100
@@ -525,16 +525,9 @@ int hvm_set_cr3(unsigned long value)
unsigned long old_base_mfn, mfn;
struct vcpu *v = current;
- if ( paging_mode_hap(v->domain) )
- {
- /* HAP mode. HAP-specific code does all the hard work. */
- v->arch.hvm_vcpu.guest_cr[3] = value;
- paging_update_cr3(v);
- }
- else if ( !hvm_paging_enabled(v) )
- {
- /* Shadow-mode, paging disabled. Just update guest CR3 value. */
- v->arch.hvm_vcpu.guest_cr[3] = value;
+ if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
+ {
+ /* Nothing to do. */
}
else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
{
@@ -542,7 +535,6 @@ int hvm_set_cr3(unsigned long value)
mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
goto bad_cr3;
- paging_update_cr3(v);
}
else
{
@@ -558,16 +550,54 @@ int hvm_set_cr3(unsigned long value)
if ( old_base_mfn )
put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_vcpu.guest_cr[3] = value;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
- paging_update_cr3(v);
- }
-
+ }
+
+ v->arch.hvm_vcpu.guest_cr[3] = value;
+ paging_update_cr3(v);
return 1;
bad_cr3:
gdprintk(XENLOG_ERR, "Invalid CR3\n");
domain_crash(v->domain);
+ return 0;
+}
+
+int hvm_set_cr4(unsigned long value)
+{
+ struct vcpu *v = current;
+ unsigned long old_cr;
+
+ if ( value & HVM_CR4_GUEST_RESERVED_BITS )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_1,
+ "Guest attempts to set reserved bit in CR4: %lx",
+ value);
+ goto gpf;
+ }
+
+ if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
+ "EFER.LMA is set");
+ goto gpf;
+ }
+
+ old_cr = v->arch.hvm_vcpu.guest_cr[4];
+ v->arch.hvm_vcpu.guest_cr[4] = value;
+ v->arch.hvm_vcpu.hw_cr[4] = value | HVM_CR4_HOST_MASK;
+ if ( paging_mode_hap(v->domain) )
+ v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_PAE;
+ hvm_update_guest_cr(v, 4);
+
+ /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
+ if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
+ paging_update_paging_modes(v);
+
+ return 1;
+
+ gpf:
+ hvm_inject_exception(TRAP_gp_fault, 0, 0);
return 0;
}
@@ -861,12 +891,6 @@ int hvm_do_hypercall(struct cpu_user_reg
return (this_cpu(hc_preempted) ? HVM_HCALL_preempted :
flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
-}
-
-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
-{
- v->arch.hvm_vcpu.hw_cr[3] = guest_cr3;
- hvm_funcs.update_guest_cr3(v);
}
static void hvm_latch_shinfo_size(struct domain *d)
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Wed Aug 08 11:28:26 2007 +0100
@@ -578,10 +578,20 @@ static void svm_update_host_cr3(struct v
/* SVM doesn't have a HOST_CR3 equivalent to update. */
}
-static void svm_update_guest_cr3(struct vcpu *v)
-{
- v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
- svm_asid_inv_asid(v);
+static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+ switch ( cr )
+ {
+ case 3:
+ v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
+ svm_asid_inv_asid(v);
+ break;
+ case 4:
+ v->arch.hvm_svm.vmcb->cr4 = v->arch.hvm_vcpu.hw_cr[4];
+ break;
+ default:
+ BUG();
+ }
}
static void svm_flush_guest_tlbs(void)
@@ -917,7 +927,7 @@ static struct hvm_function_table svm_fun
.get_segment_base = svm_get_segment_base,
.get_segment_register = svm_get_segment_register,
.update_host_cr3 = svm_update_host_cr3,
- .update_guest_cr3 = svm_update_guest_cr3,
+ .update_guest_cr = svm_update_guest_cr,
.flush_guest_tlbs = svm_flush_guest_tlbs,
.update_vtpr = svm_update_vtpr,
.stts = svm_stts,
@@ -1684,9 +1694,6 @@ static int svm_set_cr0(unsigned long val
return 1;
}
-/*
- * Read from control registers. CR0 and CR4 are read from the shadow.
- */
static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
{
unsigned long value = 0;
@@ -1725,13 +1732,9 @@ static void mov_from_cr(int cr, int gp,
HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
}
-
-/*
- * Write to control registers
- */
static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
{
- unsigned long value, old_cr;
+ unsigned long value;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -1752,69 +1755,7 @@ static int mov_to_cr(int gpreg, int cr,
return hvm_set_cr3(value);
case 4:
- if ( value & HVM_CR4_GUEST_RESERVED_BITS )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Guest attempts to set reserved bit in CR4: %lx",
- value);
- svm_inject_exception(v, TRAP_gp_fault, 1, 0);
- break;
- }
-
- if ( paging_mode_hap(v->domain) )
- {
- v->arch.hvm_vcpu.guest_cr[4] = value;
- vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
- paging_update_paging_modes(v);
- break;
- }
-
- old_cr = v->arch.hvm_vcpu.guest_cr[4];
- if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
- {
- if ( hvm_paging_enabled(v) )
- {
-#if CONFIG_PAGING_LEVELS >= 3
- /* The guest is a 32-bit PAE guest. */
- unsigned long mfn, old_base_mfn;
- mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >>
PAGE_SHIFT);
- if ( !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
-
- /*
- * Now arch.guest_table points to machine physical.
- */
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
- paging_update_paging_modes(v);
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU,
- "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
-#endif
- }
- }
- else if ( !(value & X86_CR4_PAE) )
- {
- if ( hvm_long_mode_enabled(v) )
- {
- svm_inject_exception(v, TRAP_gp_fault, 1, 0);
- }
- }
-
- v->arch.hvm_vcpu.guest_cr[4] = value;
- vmcb->cr4 = value | HVM_CR4_HOST_MASK;
-
- /*
- * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
- * all TLB entries except global entries.
- */
- if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
- paging_update_paging_modes(v);
- break;
+ return hvm_set_cr4(value);
case 8:
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -1828,19 +1769,11 @@ static int mov_to_cr(int gpreg, int cr,
}
return 1;
-
- bad_cr3:
- gdprintk(XENLOG_ERR, "Invalid CR3\n");
- domain_crash(v->domain);
- return 0;
-}
-
-
-#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
-
-
-static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
- struct cpu_user_regs *regs)
+}
+
+static void svm_cr_access(
+ struct vcpu *v, unsigned int cr, unsigned int type,
+ struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
int inst_len = 0;
@@ -1865,12 +1798,12 @@ static int svm_cr_access(struct vcpu *v,
if ( type == TYPE_MOV_TO_CR )
{
inst_len = __get_instruction_length_from_list(
- v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
+ v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
}
else /* type == TYPE_MOV_FROM_CR */
{
inst_len = __get_instruction_length_from_list(
- v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
+ v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
}
ASSERT(inst_len > 0);
@@ -1883,7 +1816,8 @@ static int svm_cr_access(struct vcpu *v,
HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
- switch (match)
+ switch ( match )
+
{
case INSTR_MOV2CR:
gpreg = decode_src_reg(prefix, buffer[index+2]);
@@ -1974,9 +1908,8 @@ static int svm_cr_access(struct vcpu *v,
ASSERT(inst_len);
- __update_guest_eip(vmcb, inst_len);
-
- return result;
+ if ( result )
+ __update_guest_eip(vmcb, inst_len);
}
static void svm_do_msr_access(
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/hvm/vioapic.c
--- a/xen/arch/x86/hvm/vioapic.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/hvm/vioapic.c Wed Aug 08 11:28:26 2007 +0100
@@ -43,10 +43,6 @@
/* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
#define IRQ0_SPECIAL_ROUTING 1
-#if defined(__ia64__)
-#define opt_hvm_debug_level opt_vmx_debug_level
-#endif
-
static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Aug 08 11:28:26 2007 +0100
@@ -1087,11 +1087,25 @@ static void vmx_update_host_cr3(struct v
vmx_vmcs_exit(v);
}
-static void vmx_update_guest_cr3(struct vcpu *v)
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
{
ASSERT((v == current) || !vcpu_runnable(v));
+
vmx_vmcs_enter(v);
- __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+
+ switch ( cr )
+ {
+ case 3:
+ __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
+ break;
+ case 4:
+ __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
+ __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+ break;
+ default:
+ BUG();
+ }
+
vmx_vmcs_exit(v);
}
@@ -1157,7 +1171,7 @@ static struct hvm_function_table vmx_fun
.get_segment_base = vmx_get_segment_base,
.get_segment_register = vmx_get_segment_register,
.update_host_cr3 = vmx_update_host_cr3,
- .update_guest_cr3 = vmx_update_guest_cr3,
+ .update_guest_cr = vmx_update_guest_cr,
.flush_guest_tlbs = vmx_flush_guest_tlbs,
.update_vtpr = vmx_update_vtpr,
.stts = vmx_stts,
@@ -2263,12 +2277,9 @@ static int vmx_set_cr0(unsigned long val
CASE_ ## T ## ET_REG(R15, r15)
#endif
-/*
- * Write to control registers
- */
static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
{
- unsigned long value, old_cr;
+ unsigned long value;
struct vcpu *v = current;
struct vlapic *vlapic = vcpu_vlapic(v);
@@ -2303,66 +2314,7 @@ static int mov_to_cr(int gp, int cr, str
return hvm_set_cr3(value);
case 4:
- old_cr = v->arch.hvm_vcpu.guest_cr[4];
-
- if ( value & HVM_CR4_GUEST_RESERVED_BITS )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Guest attempts to set reserved bit in CR4: %lx",
- value);
- vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
- return 0;
- }
-
- if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
- {
- if ( hvm_paging_enabled(v) )
- {
-#if CONFIG_PAGING_LEVELS >= 3
- /* The guest is a 32-bit PAE guest. */
- unsigned long mfn, old_base_mfn;
- mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3] >>
PAGE_SHIFT);
- if ( !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- goto bad_cr3;
-
- /*
- * Now arch.guest_table points to machine physical.
- */
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU,
- "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
-#endif
- }
- }
- else if ( !(value & X86_CR4_PAE) )
- {
- if ( unlikely(hvm_long_mode_enabled(v)) )
- {
- HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
- "EFER.LMA is set");
- vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
- return 0;
- }
- }
-
- __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
- v->arch.hvm_vcpu.guest_cr[4] = value;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
-
- /*
- * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
- * all TLB entries except global entries.
- */
- if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
- paging_update_paging_modes(v);
-
- break;
+ return hvm_set_cr4(value);
case 8:
vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
@@ -2370,14 +2322,11 @@ static int mov_to_cr(int gp, int cr, str
default:
gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
- domain_crash(v->domain);
- return 0;
+ goto exit_and_crash;
}
return 1;
- bad_cr3:
- gdprintk(XENLOG_ERR, "Invalid CR3\n");
exit_and_crash:
domain_crash(v->domain);
return 0;
@@ -2438,7 +2387,8 @@ static int vmx_cr_access(unsigned long e
unsigned long value;
struct vcpu *v = current;
- switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
+ switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
+ {
case TYPE_MOV_TO_CR:
gp = exit_qualification & CONTROL_REG_ACCESS_REG;
cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/mm/hap/hap.c
--- a/xen/arch/x86/mm/hap/hap.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/mm/hap/hap.c Wed Aug 08 11:28:26 2007 +0100
@@ -605,7 +605,8 @@ static int hap_invlpg(struct vcpu *v, un
static void hap_update_cr3(struct vcpu *v, int do_locking)
{
- hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
+ v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
+ hvm_update_guest_cr(v, 3);
}
static void hap_update_paging_modes(struct vcpu *v)
@@ -631,7 +632,7 @@ static void hap_update_paging_modes(stru
}
/* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
- hvm_update_guest_cr3(v, v->arch.hvm_vcpu.guest_cr[3]);
+ hap_update_cr3(v, 0);
hap_unlock(d);
}
diff -r 511c41a55045 -r da2c7dab1a3a xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/arch/x86/mm/shadow/multi.c Wed Aug 08 11:28:26 2007 +0100
@@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
* Paravirtual guests should set v->arch.guest_table (and guest_table_user,
* if appropriate).
* HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
- * this function will call hvm_update_guest_cr3() to tell them where the
+ * this function will call hvm_update_guest_cr(v, 3) to tell them where the
* shadow tables are.
* If do_locking != 0, assume we are being called from outside the
* shadow code, and must take and release the shadow lock; otherwise
@@ -3725,11 +3725,14 @@ sh_update_cr3(struct vcpu *v, int do_loc
ASSERT(is_hvm_domain(d));
#if SHADOW_PAGING_LEVELS == 3
/* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
- hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
+ v->arch.hvm_vcpu.hw_cr[3] =
+ virt_to_maddr(&v->arch.paging.shadow.l3table);
#else
/* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
- hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
-#endif
+ v->arch.hvm_vcpu.hw_cr[3] =
+ pagetable_get_paddr(v->arch.shadow_table[0]);
+#endif
+ hvm_update_guest_cr(v, 3);
}
/* Fix up the linear pagetable mappings */
diff -r 511c41a55045 -r da2c7dab1a3a xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h Wed Aug 08 11:28:26 2007 +0100
@@ -107,14 +107,14 @@ struct hvm_function_table {
struct segment_register *reg);
/*
- * Re-set the value of CR3 that Xen runs on when handling VM exits
+ * Re-set the value of CR3 that Xen runs on when handling VM exits.
*/
void (*update_host_cr3)(struct vcpu *v);
/*
- * Called to inform HVM layer that a guest cr3 has changed
- */
- void (*update_guest_cr3)(struct vcpu *v);
+ * Called to inform HVM layer that a guest control register has changed.
+ */
+ void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
/*
* Called to ensure than all guest-specific mappings in a tagged TLB
@@ -220,7 +220,10 @@ hvm_update_vtpr(struct vcpu *v, unsigned
hvm_funcs.update_vtpr(v, value);
}
-void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
+static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
+{
+ hvm_funcs.update_guest_cr(v, cr);
+}
static inline void
hvm_flush_guest_tlbs(void)
diff -r 511c41a55045 -r da2c7dab1a3a xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Aug 08 10:34:03 2007 +0100
+++ b/xen/include/asm-x86/hvm/support.h Wed Aug 08 11:28:26 2007 +0100
@@ -235,5 +235,6 @@ void hvm_triple_fault(void);
void hvm_triple_fault(void);
int hvm_set_cr3(unsigned long value);
+int hvm_set_cr4(unsigned long value);
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|