# HG changeset patch
# User Eddie Dong <eddie.dong@xxxxxxxxx>
# Date 1307607849 -28800
# Node ID a91e0e23e2780c80fa13291027e83c961c5e385b
# Parent 3ded99964cdf2a9939f5e938ae110ee67e40412a
Nested VMX: VM exit handler of n2-guest
Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
Acked-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Committed-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>
---
diff -r 3ded99964cdf -r a91e0e23e278 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 09 16:24:09 2011 +0800
@@ -942,6 +942,10 @@
static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
{
vmx_vmcs_enter(v);
+
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ offset += nvmx_get_tsc_offset(v);
+
__vmwrite(TSC_OFFSET, offset);
#if defined (__i386__)
__vmwrite(TSC_OFFSET_HIGH, offset >> 32);
@@ -2253,6 +2257,11 @@
* any pending vmresume has really happened
*/
vcpu_nestedhvm(v).nv_vmswitch_in_progress = 0;
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ {
+ if ( nvmx_n2_vmexit_handler(regs, exit_reason) )
+ goto out;
+ }
if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
return vmx_failed_vmentry(exit_reason, regs);
@@ -2654,6 +2663,7 @@
break;
}
+out:
if ( nestedhvm_vcpu_in_guestmode(v) )
nvmx_idtv_handling();
}
diff -r 3ded99964cdf -r a91e0e23e278 xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c Thu Jun 09 16:24:09 2011 +0800
@@ -288,13 +288,19 @@
if ( (regs->eflags & X86_EFLAGS_VM) ||
(hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
goto invalid_op;
- /* TODO: check vmx operation mode */
+ else if ( nestedhvm_vcpu_in_guestmode(v) )
+ goto vmexit;
if ( (cs.sel & 3) > 0 )
goto gp_fault;
return X86EMUL_OKAY;
+vmexit:
+ gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: vmexit\n");
+ vcpu_nestedhvm(v).nv_vmexit_pending = 1;
+ return X86EMUL_EXCEPTION;
+
invalid_op:
gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n");
hvm_inject_exception(TRAP_invalid_op, 0, 0);
@@ -581,6 +587,18 @@
}
}
+u64 nvmx_get_tsc_offset(struct vcpu *v)
+{
+ u64 offset = 0;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+ if ( __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL) &
+ CPU_BASED_USE_TSC_OFFSETING )
+ offset = __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+
+ return offset;
+}
+
/*
* Context synchronized between shadow and virtual VMCS.
*/
@@ -730,6 +748,8 @@
hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO);
vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN);
@@ -855,6 +875,8 @@
hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
__set_vvmcs(vvmcs, VM_ENTRY_INTR_INFO, 0);
}
@@ -1252,3 +1274,195 @@
}
}
+/*
+ * L2 VMExit handling
+ * return 1: Done or skip the normal layer 0 hypervisor process.
+ * Typically it requires layer 1 hypervisor processing
+ * or it may be already processed here.
+ * 0: Require the normal layer 0 process.
+ */
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+ unsigned int exit_reason)
+{
+ struct vcpu *v = current;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ u32 ctrl;
+ u16 port;
+ u8 *bitmap;
+
+ nvcpu->nv_vmexit_pending = 0;
+ nvmx->intr.intr_info = 0;
+ nvmx->intr.error_code = 0;
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ {
+ u32 intr_info = __vmread(VM_EXIT_INTR_INFO);
+ u32 valid_mask = (X86_EVENTTYPE_HW_EXCEPTION << 8) |
+ INTR_INFO_VALID_MASK;
+ u64 exec_bitmap;
+ int vector = intr_info & INTR_INFO_VECTOR_MASK;
+
+ /*
+ * decided by L0 and L1 exception bitmap, if the vetor is set by
+ * both, L0 has priority on #PF, L1 has priority on others
+ */
+ if ( vector == TRAP_page_fault )
+ {
+ if ( paging_mode_hap(v->domain) )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else if ( (intr_info & valid_mask) == valid_mask )
+ {
+ exec_bitmap =__get_vvmcs(nvcpu->nv_vvmcx, EXCEPTION_BITMAP);
+
+ if ( exec_bitmap & (1 << vector) )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ break;
+ }
+ case EXIT_REASON_WBINVD:
+ case EXIT_REASON_EPT_VIOLATION:
+ case EXIT_REASON_EPT_MISCONFIG:
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ /* pass to L0 handler */
+ break;
+ case VMX_EXIT_REASONS_FAILED_VMENTRY:
+ case EXIT_REASON_TRIPLE_FAULT:
+ case EXIT_REASON_TASK_SWITCH:
+ case EXIT_REASON_CPUID:
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ case EXIT_REASON_VMCALL:
+ case EXIT_REASON_VMCLEAR:
+ case EXIT_REASON_VMLAUNCH:
+ case EXIT_REASON_VMPTRLD:
+ case EXIT_REASON_VMPTRST:
+ case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME:
+ case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMXOFF:
+ case EXIT_REASON_VMXON:
+ case EXIT_REASON_INVEPT:
+ /* inject to L1 */
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_IO_INSTRUCTION:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
+ {
+ port = __vmread(EXIT_QUALIFICATION) >> 16;
+ bitmap = nvmx->iobitmap[port >> 15];
+ if ( bitmap[(port & 0x7fff) >> 4] & (1 << (port & 0x7)) )
+ nvcpu->nv_vmexit_pending = 1;
+ if ( !nvcpu->nv_vmexit_pending )
+ gdprintk(XENLOG_WARNING, "L0 PIO %x.\n", port);
+ }
+ else if ( ctrl & CPU_BASED_UNCOND_IO_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+
+ case EXIT_REASON_PENDING_VIRT_INTR:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_VIRTUAL_INTR_PENDING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_PENDING_VIRT_NMI:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_VIRTUAL_NMI_PENDING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ /* L1 has priority handling several other types of exits */
+ case EXIT_REASON_HLT:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_HLT_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_RDTSC:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_RDTSC_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ else
+ {
+ uint64_t tsc;
+
+ /*
+ * special handler is needed if L1 doesn't intercept rdtsc,
+ * avoiding changing guest_tsc and messing up timekeeping in L1
+ */
+ tsc = hvm_get_guest_tsc(v);
+ tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+ regs->eax = (uint32_t)tsc;
+ regs->edx = (uint32_t)(tsc >> 32);
+
+ return 1;
+ }
+ break;
+ case EXIT_REASON_RDPMC:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_RDPMC_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_MWAIT_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_PAUSE_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_MONITOR_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_DR_ACCESS:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_MOV_DR_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_INVLPG:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_INVLPG_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_CR_ACCESS:
+ {
+ u64 exit_qualification = __vmread(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int write = (exit_qualification >> 4) & 3;
+ u32 mask = 0;
+
+ /* also according to guest exec_control */
+ ctrl = __n2_exec_control(v);
+
+ if ( cr == 3 )
+ {
+ mask = write? CPU_BASED_CR3_STORE_EXITING:
+ CPU_BASED_CR3_LOAD_EXITING;
+ if ( ctrl & mask )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else if ( cr == 8 )
+ {
+ mask = write? CPU_BASED_CR8_STORE_EXITING:
+ CPU_BASED_CR8_LOAD_EXITING;
+ if ( ctrl & mask )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else /* CR0, CR4, CLTS, LMSW */
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+ default:
+ gdprintk(XENLOG_WARNING, "Unknown nested vmexit reason %x.\n",
+ exit_reason);
+ }
+
+ return ( nvcpu->nv_vmexit_pending == 1 );
+}
+
diff -r 3ded99964cdf -r a91e0e23e278 xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h Thu Jun 09 16:24:09 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h Thu Jun 09 16:24:09 2011 +0800
@@ -170,6 +170,9 @@
void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
asmlinkage void nvmx_switch_guest(void);
void nvmx_idtv_handling(void);
+u64 nvmx_get_tsc_offset(struct vcpu *v);
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+ unsigned int exit_reason);
#endif /* __ASM_X86_HVM_VVMX_H__ */
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|