ChangeSet 1.1709, 2005/06/10 08:04:18+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Adding the VMCS encodings as per "Intel Virtualization Technology
Specification for the IA-32 Intel Architecture", and removing hard
coded values. Code cleanup & preparation for 64bit VMX guest support.
Signed-Off-By: Chengyuan Li <chengyuan.li@xxxxxxxxx>
Signed-Off-By: Yunhong Jiang <hunhong.jiang@xxxxxxxxx>
Signed-Off-By: Jun Nakajima <jun.nakajima@xxxxxxxxx>
Signed-Off-By: Nitin A Kamble <nitin.a.kamble@xxxxxxxxx>
arch/x86/domain.c | 2
arch/x86/vmx.c | 78 ++++++++++++++++++------------------
arch/x86/vmx_io.c | 10 ++--
arch/x86/vmx_platform.c | 14 +++---
arch/x86/vmx_vmcs.c | 14 +++---
arch/x86/x86_32/traps.c | 6 +-
include/asm-x86/vmx.h | 47 ++++++++++++++++++----
include/asm-x86/vmx_vmcs.h | 95 +++++++++++++++++++++++++++++++++++++++------
8 files changed, 185 insertions(+), 81 deletions(-)
diff -Nru a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c 2005-06-10 04:03:47 -04:00
+++ b/xen/arch/x86/domain.c 2005-06-10 04:03:47 -04:00
@@ -414,7 +414,7 @@
/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
} else {
- __vmwrite(GUEST_EFLAGS, v->arch.guest_context.user_regs.eflags);
+ __vmwrite(GUEST_RFLAGS, v->arch.guest_context.user_regs.eflags);
if (v->arch.guest_context.user_regs.eflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
else
diff -Nru a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c
--- a/xen/arch/x86/vmx.c 2005-06-10 04:03:47 -04:00
+++ b/xen/arch/x86/vmx.c 2005-06-10 04:03:47 -04:00
@@ -114,8 +114,8 @@
{
unsigned long current_eip;
- __vmread(GUEST_EIP, ¤t_eip);
- __vmwrite(GUEST_EIP, current_eip + inst_len);
+ __vmread(GUEST_RIP, ¤t_eip);
+ __vmwrite(GUEST_RIP, current_eip + inst_len);
}
@@ -128,7 +128,7 @@
#if VMX_DEBUG
{
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_VMMU,
"vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
va, eip, (unsigned long)regs->error_code);
@@ -152,7 +152,7 @@
#if 0
if ( !result )
{
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
}
#endif
@@ -180,7 +180,7 @@
unsigned long eip, error_code;
unsigned long intr_fields;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
VMX_DBG_LOG(DBG_LEVEL_1,
@@ -207,7 +207,7 @@
unsigned int eax, ebx, ecx, edx;
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
@@ -245,7 +245,7 @@
struct vcpu *v = current;
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
@@ -274,7 +274,7 @@
v->arch.guest_context.debugreg[reg] = *reg_p;
else {
unsigned long value;
- __vmread(GUEST_ESP, &value);
+ __vmread(GUEST_RSP, &value);
v->arch.guest_context.debugreg[reg] = value;
}
break;
@@ -282,7 +282,7 @@
if (reg != REG_ESP)
*reg_p = v->arch.guest_context.debugreg[reg];
else {
- __vmwrite(GUEST_ESP, v->arch.guest_context.debugreg[reg]);
+ __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
}
break;
}
@@ -297,7 +297,7 @@
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
eip, va);
@@ -368,9 +368,9 @@
unsigned long eip, cs, eflags;
int vm86;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(GUEST_CS_SELECTOR, &cs);
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RFLAGS, &eflags);
vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
VMX_DBG_LOG(DBG_LEVEL_1,
@@ -495,10 +495,10 @@
int error = 0;
error |= __vmread(INSTRUCTION_LEN, &inst_len);
- error |= __vmread(GUEST_EIP, &c->eip);
+ error |= __vmread(GUEST_RIP, &c->eip);
c->eip += inst_len; /* skip transition instruction */
- error |= __vmread(GUEST_ESP, &c->esp);
- error |= __vmread(GUEST_EFLAGS, &c->eflags);
+ error |= __vmread(GUEST_RSP, &c->esp);
+ error |= __vmread(GUEST_RFLAGS, &c->eflags);
error |= __vmread(CR0_READ_SHADOW, &c->cr0);
c->cr3 = d->arch.arch_vmx.cpu_cr3;
@@ -559,9 +559,9 @@
unsigned long mfn, old_cr4;
int error = 0;
- error |= __vmwrite(GUEST_EIP, c->eip);
- error |= __vmwrite(GUEST_ESP, c->esp);
- error |= __vmwrite(GUEST_EFLAGS, c->eflags);
+ error |= __vmwrite(GUEST_RIP, c->eip);
+ error |= __vmwrite(GUEST_RSP, c->esp);
+ error |= __vmwrite(GUEST_RFLAGS, c->eflags);
error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
@@ -783,25 +783,25 @@
* a partition disables the CR0.PE bit.
*/
if ((value & X86_CR0_PE) == 0) {
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Disabling CR0.PE at %%eip 0x%lx\n", eip);
if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Transfering control to vmxassist %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
}
} else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Enabling CR0.PE at %%eip 0x%lx\n", eip);
if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
&d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Restoring to %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
@@ -832,7 +832,7 @@
CASE_GET_REG(ESI, esi);
CASE_GET_REG(EDI, edi);
case REG_ESP:
- __vmread(GUEST_ESP, &value);
+ __vmread(GUEST_RSP, &value);
break;
default:
printk("invalid gp: %d\n", gp);
@@ -953,7 +953,7 @@
CASE_SET_REG(ESI, esi);
CASE_SET_REG(EDI, edi);
case REG_ESP:
- __vmwrite(GUEST_ESP, value);
+ __vmwrite(GUEST_RSP, value);
regs->esp = value;
break;
default:
@@ -1025,7 +1025,7 @@
{
#if VMX_DEBUG
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
#endif
VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
raise_softirq(SCHEDULE_SOFTIRQ);
@@ -1035,7 +1035,7 @@
{
#if VMX_DEBUG
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
#endif
VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
raise_softirq(SCHEDULE_SOFTIRQ);
@@ -1064,10 +1064,10 @@
void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
{
__vmread(GUEST_SS_SELECTOR, &ctxt->ss);
- __vmread(GUEST_ESP, &ctxt->esp);
- __vmread(GUEST_EFLAGS, &ctxt->eflags);
+ __vmread(GUEST_RSP, &ctxt->esp);
+ __vmread(GUEST_RFLAGS, &ctxt->eflags);
__vmread(GUEST_CS_SELECTOR, &ctxt->cs);
- __vmread(GUEST_EIP, &ctxt->eip);
+ __vmread(GUEST_RIP, &ctxt->eip);
__vmread(GUEST_GS_SELECTOR, &ctxt->gs);
__vmread(GUEST_FS_SELECTOR, &ctxt->fs);
@@ -1079,10 +1079,10 @@
void save_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->xss);
- __vmread(GUEST_ESP, ®s->esp);
- __vmread(GUEST_EFLAGS, ®s->eflags);
+ __vmread(GUEST_RSP, ®s->esp);
+ __vmread(GUEST_RFLAGS, ®s->eflags);
__vmread(GUEST_CS_SELECTOR, ®s->xcs);
- __vmread(GUEST_EIP, ®s->eip);
+ __vmread(GUEST_RIP, ®s->eip);
__vmread(GUEST_GS_SELECTOR, ®s->xgs);
__vmread(GUEST_FS_SELECTOR, ®s->xfs);
@@ -1093,10 +1093,10 @@
void restore_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmwrite(GUEST_SS_SELECTOR, regs->xss);
- __vmwrite(GUEST_ESP, regs->esp);
- __vmwrite(GUEST_EFLAGS, regs->eflags);
+ __vmwrite(GUEST_RSP, regs->esp);
+ __vmwrite(GUEST_RFLAGS, regs->eflags);
__vmwrite(GUEST_CS_SELECTOR, regs->xcs);
- __vmwrite(GUEST_EIP, regs->eip);
+ __vmwrite(GUEST_RIP, regs->eip);
__vmwrite(GUEST_GS_SELECTOR, regs->xgs);
__vmwrite(GUEST_FS_SELECTOR, regs->xfs);
@@ -1142,7 +1142,7 @@
return;
}
- __vmread(GUEST_EIP, &eip);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|