xen-devel
[Xen-devel] RFC: Nested VMX patch series 15: exit from n2 guest
To: |
"Dong, Eddie" <eddie.dong@xxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>, Keir Fraser <keir@xxxxxxx> |
Subject: |
[Xen-devel] RFC: Nested VMX patch series 15: exit from n2 guest |
From: |
"Dong, Eddie" <eddie.dong@xxxxxxxxx> |
Date: |
Wed, 1 Jun 2011 12:06:45 +0800 |
Accept-language: |
en-US |
Acceptlanguage: |
en-US |
Cc: |
"xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, "Dong, Eddie" <eddie.dong@xxxxxxxxx>, "He, Qing" <qing.he@xxxxxxxxx> |
Delivery-date: |
Tue, 31 May 2011 21:14:32 -0700 |
Envelope-to: |
www-data@xxxxxxxxxxxxxxxxxxx |
List-help: |
<mailto:xen-devel-request@lists.xensource.com?subject=help> |
List-id: |
Xen developer discussion <xen-devel.lists.xensource.com> |
List-post: |
<mailto:xen-devel@lists.xensource.com> |
List-subscribe: |
<http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe> |
List-unsubscribe: |
<http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe> |
References: |
<osstest-7468-mainreport@xxxxxxx> |
Sender: |
xen-devel-bounces@xxxxxxxxxxxxxxxxxxx |
Thread-index: |
AcwgAhjDwUdZ/2BOTBqtK+IA8ti/WgAC9edgAAAdViAAAA3MIAAAF9kwAAAdbWAAABYxcAAAC+egAAALN5AAAAWccAAABoAAAAAHMIAAAArpgAAABgbgAAAVkKAAAAZ4YA== |
Thread-topic: |
[Xen-devel] RFC: Nested VMX patch series 15: exit from n2 guest |
Thx, Eddie
Signed-off-by: Qing He <qing.he@xxxxxxxxx>
Signed-off-by: Eddie Dong <eddie.dong@xxxxxxxxx>
diff -r 8a54c14b7bbd xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Sat May 28 19:02:20 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vmx.c Sat May 28 19:02:28 2011 +0800
@@ -943,6 +943,10 @@
static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
{
vmx_vmcs_enter(v);
+
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ offset += nvmx_get_tsc_offset(v);
+
__vmwrite(TSC_OFFSET, offset);
#if defined (__i386__)
__vmwrite(TSC_OFFSET_HIGH, offset >> 32);
@@ -2203,6 +2207,11 @@
* any pending vmresume has really happened
*/
vcpu_nestedhvm(v).nv_vmswitch_in_progress = 0;
+ if ( nestedhvm_vcpu_in_guestmode(v) )
+ {
+ if ( nvmx_n2_vmexit_handler(regs, exit_reason) )
+ goto out;
+ }
if ( unlikely(exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) )
return vmx_failed_vmentry(exit_reason, regs);
@@ -2605,6 +2614,7 @@
break;
}
+out:
if ( nestedhvm_vcpu_in_guestmode(v) )
nvmx_idtv_handling();
}
diff -r 8a54c14b7bbd xen/arch/x86/hvm/vmx/vvmx.c
--- a/xen/arch/x86/hvm/vmx/vvmx.c Sat May 28 19:02:20 2011 +0800
+++ b/xen/arch/x86/hvm/vmx/vvmx.c Sat May 28 19:02:28 2011 +0800
@@ -348,13 +348,19 @@
if ( (regs->eflags & X86_EFLAGS_VM) ||
(hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
goto invalid_op;
- /* TODO: check vmx operation mode */
+ else if ( nestedhvm_vcpu_in_guestmode(v) )
+ goto vmexit;
if ( (cs.sel & 3) > 0 )
goto gp_fault;
return X86EMUL_OKAY;
+vmexit:
+ gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: vmexit\n");
+ vcpu_nestedhvm(v).nv_vmexit_pending = 1;
+ return X86EMUL_EXCEPTION;
+
invalid_op:
gdprintk(XENLOG_ERR, "vmx_inst_check_privilege: invalid_op\n");
hvm_inject_exception(TRAP_invalid_op, 0, 0);
@@ -610,6 +616,18 @@
}
}
+u64 nvmx_get_tsc_offset(struct vcpu *v)
+{
+ u64 offset = 0;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+ if ( __get_vvmcs(nvcpu->nv_vvmcx, CPU_BASED_VM_EXEC_CONTROL) &
+ CPU_BASED_USE_TSC_OFFSETING )
+ offset = __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+
+ return offset;
+}
+
/*
* Context synchronized between shadow and virtual VMCS.
*/
@@ -763,6 +781,8 @@
hvm_set_cr4(__get_vvmcs(vvmcs, GUEST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, GUEST_CR3));
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
vvmcs_to_shadow(vvmcs, VM_ENTRY_INTR_INFO);
vvmcs_to_shadow(vvmcs, VM_ENTRY_EXCEPTION_ERROR_CODE);
vvmcs_to_shadow(vvmcs, VM_ENTRY_INSTRUCTION_LEN);
@@ -891,6 +911,8 @@
hvm_set_cr4(__get_vvmcs(vvmcs, HOST_CR4));
hvm_set_cr3(__get_vvmcs(vvmcs, HOST_CR3));
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
__set_vvmcs(vvmcs, VM_ENTRY_INTR_INFO, 0);
}
@@ -1289,3 +1311,252 @@
/* TODO: NMI */
}
+/*
+ * L2 VMExit handling
+ * return 1: Done or skip the normal layer 0 hypervisor process.
+ * Typically it requires layer 1 hypervisor processing
+ * or it may be already processed here.
+ * 0: Require the normal layer 0 process.
+ */
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+ unsigned int exit_reason)
+{
+ struct vcpu *v = current;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ u32 ctrl;
+ u16 port;
+ u8 *bitmap;
+
+ nvcpu->nv_vmexit_pending = 0;
+ nvmx->intr.intr_info = 0;
+ nvmx->intr.error_code = 0;
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ {
+ u32 intr_info = __vmread(VM_EXIT_INTR_INFO);
+ u32 valid_mask = (X86_EVENTTYPE_HW_EXCEPTION << 8) |
+ INTR_INFO_VALID_MASK;
+ u64 exec_bitmap;
+ int vector = intr_info & INTR_INFO_VECTOR_MASK;
+
+ /*
+ * decided by L0 and L1 exception bitmap, if the vetor is set by
+ * both, L0 has priority on #PF, L1 has priority on others
+ */
+ if ( vector == TRAP_page_fault )
+ {
+ if ( paging_mode_hap(v->domain) )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else if ( (intr_info & valid_mask) == valid_mask )
+ {
+ exec_bitmap =__get_vvmcs(nvcpu->nv_vvmcx, EXCEPTION_BITMAP);
+
+ if ( exec_bitmap & (1 << vector) )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ break;
+ }
+
+ case EXIT_REASON_WBINVD:
+ case EXIT_REASON_EPT_VIOLATION:
+ case EXIT_REASON_EPT_MISCONFIG:
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ /* pass to L0 handler */
+ break;
+
+ case VMX_EXIT_REASONS_FAILED_VMENTRY:
+ case EXIT_REASON_TRIPLE_FAULT:
+ case EXIT_REASON_TASK_SWITCH:
+ case EXIT_REASON_CPUID:
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ case EXIT_REASON_VMCALL:
+ case EXIT_REASON_VMCLEAR:
+ case EXIT_REASON_VMLAUNCH:
+ case EXIT_REASON_VMPTRLD:
+ case EXIT_REASON_VMPTRST:
+ case EXIT_REASON_VMREAD:
+ case EXIT_REASON_VMRESUME:
+ case EXIT_REASON_VMWRITE:
+ case EXIT_REASON_VMXOFF:
+ case EXIT_REASON_VMXON:
+ case EXIT_REASON_INVEPT:
+ /* inject to L1 */
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+ case EXIT_REASON_IO_INSTRUCTION:
+ ctrl = __n2_exec_control(v);
+ if ( ctrl & CPU_BASED_ACTIVATE_IO_BITMAP )
+ {
+ port = __vmread(EXIT_QUALIFICATION) >> 16;
+ bitmap = nvmx->iobitmap[port >> 15];
+ if ( bitmap[(port <<1) >> 4] & (1 << (port & 0x7)) )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else if ( ctrl & CPU_BASED_UNCOND_IO_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ break;
+
+ case EXIT_REASON_PENDING_VIRT_INTR:
+ {
+ ctrl = v->arch.hvm_vmx.exec_control;
+
+ /*
+ * if both open intr/nmi window, L0 has priority.
+ *
+ * Note that this is not strictly correct, in L2 context,
+ * L0's intr/nmi window flag should be replaced to MTF,
+ * causing an imediate VMExit, but MTF may not be available
+ * on all hardware.
+ */
+ if ( !(ctrl & CPU_BASED_VIRTUAL_INTR_PENDING) )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+ case EXIT_REASON_PENDING_VIRT_NMI:
+ {
+ ctrl = v->arch.hvm_vmx.exec_control;
+
+ if ( !(ctrl & CPU_BASED_VIRTUAL_NMI_PENDING) )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ /* L1 has priority handling several other types of exits */
+ case EXIT_REASON_HLT:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_HLT_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_RDTSC:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_RDTSC_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+ else
+ {
+ uint64_t tsc;
+
+ /*
+ * special handler is needed if L1 doesn't intercept rdtsc,
+ * avoiding changing guest_tsc and messing up timekeeping in L1
+ */
+ tsc = hvm_get_guest_tsc(v);
+ tsc += __get_vvmcs(nvcpu->nv_vvmcx, TSC_OFFSET);
+ regs->eax = (uint32_t)tsc;
+ regs->edx = (uint32_t)(tsc >> 32);
+
+ return 1;
+ }
+
+ break;
+ }
+
+ case EXIT_REASON_RDPMC:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_RDPMC_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_MWAIT_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_PAUSE_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_MONITOR_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_DR_ACCESS:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_MOV_DR_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_INVLPG:
+ {
+ ctrl = __n2_exec_control(v);
+
+ if ( ctrl & CPU_BASED_INVLPG_EXITING )
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+
+ case EXIT_REASON_CR_ACCESS:
+ {
+ u64 exit_qualification = __vmread(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int write = (exit_qualification >> 4) & 3;
+ u32 mask = 0;
+
+ /* also according to guest exec_control */
+ ctrl = __n2_exec_control(v);
+
+ if ( cr == 3 )
+ {
+ mask = write? CPU_BASED_CR3_STORE_EXITING:
+ CPU_BASED_CR3_LOAD_EXITING;
+ if ( ctrl & mask )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else if ( cr == 8 )
+ {
+ mask = write? CPU_BASED_CR8_STORE_EXITING:
+ CPU_BASED_CR8_LOAD_EXITING;
+ if ( ctrl & mask )
+ nvcpu->nv_vmexit_pending = 1;
+ }
+ else /* CR0, CR4, CLTS, LMSW */
+ nvcpu->nv_vmexit_pending = 1;
+
+ break;
+ }
+ default:
+ gdprintk(XENLOG_WARNING, "Unknown nested vmexit reason %x.\n",
+ exit_reason);
+ }
+
+ return ( nvcpu->nv_vmexit_pending == 1 );
+}
+
diff -r 8a54c14b7bbd xen/include/asm-x86/hvm/vmx/vvmx.h
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h Sat May 28 19:02:20 2011 +0800
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h Sat May 28 19:02:28 2011 +0800
@@ -123,6 +123,9 @@
void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
asmlinkage void nvmx_switch_layed_guest(void);
void nvmx_idtv_handling(void);
+u64 nvmx_get_tsc_offset(struct vcpu *v);
+int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
+ unsigned int exit_reason);
#endif /* __ASM_X86_HVM_VVMX_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
<Prev in Thread] |
Current Thread |
[Next in Thread>
|
- [Xen-devel] RFC: Nested VMX patch series 04: virtual VMCS structure and APIs, (continued)
- [Xen-devel] RFC: Nested VMX patch series 04: virtual VMCS structure and APIs, Dong, Eddie
- RE: [Xen-devel] RFC: Nested VMX patch series 05: vmptrst, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 07: vmclear, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 08: vmwrite, Dong, Eddie
- RE: [Xen-devel] RFC: Nested VMX patch series 09: vmread, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 10: vmcs switching API, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 11: vmresume, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 12: shadow vmcs control, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 12: real VMCS switch, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 14: exceptions, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 15: exit from n2 guest,
Dong, Eddie <=
- RE: [Xen-devel] RFC: Nested VMX patch series 16: fpu, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 17: cr4, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 18: capability, Dong, Eddie
- [Xen-devel] RFC: Nested VMX patch series 00, Dong, Eddie
|
|
|