VMX: enforce invept checking
escalate to use all context invalidation if single context invalidation is not
supported.
signed-off-by: Xin Li <xin.li@xxxxxxxxx>
diff -r def12332b19c xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c Thu Jun 10 11:18:02 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmcs.c Wed Jun 09 22:42:17 2010 -0700
@@ -204,11 +204,14 @@ static int vmx_init_vmcs_config(void)
* ept paging structures memory type to WB;
* 2) the CPU must support the EPT page-walk length of 4 according to
* Intel SDM 25.2.2.
+ * 3) the CPU must support INVEPT all context invalidation, because we
+ * will use it as final resort if other types are not supported.
*
* Or we just don't use EPT.
*/
if ( !(_vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) ||
- !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) )
+ !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) ||
+ !(_vmx_ept_vpid_cap & VMX_EPT_INVEPT_ALL_CONTEXT) )
_vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
}
@@ -512,7 +515,8 @@ int vmx_cpu_up(void)
hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
- ept_sync_all();
+ if ( cpu_has_vmx_ept )
+ ept_sync_all();
if ( cpu_has_vmx_vpid )
vpid_sync_all();
diff -r def12332b19c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Thu Jun 10 11:18:02 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Wed Jun 09 22:42:17 2010 -0700
@@ -678,7 +678,7 @@ static void vmx_ctxt_switch_to(struct vc
/* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
if ( !cpu_isset(cpu, d->arch.hvm_domain.vmx.ept_synced) &&
!cpu_test_and_set(cpu, d->arch.hvm_domain.vmx.ept_synced) )
- __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
+ __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0);
}
vmx_restore_guest_msrs(v);
@@ -1210,7 +1210,7 @@ static void __ept_sync_domain(void *info
static void __ept_sync_domain(void *info)
{
struct domain *d = info;
- __invept(1, d->arch.hvm_domain.vmx.ept_control.eptp, 0);
+ __invept(INVEPT_SINGLE_CONTEXT, ept_get_eptp(d), 0);
}
void ept_sync_domain(struct domain *d)
diff -r def12332b19c xen/include/asm-x86/hvm/vmx/vmcs.h
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h Thu Jun 10 11:18:02 2010 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Wed Jun 09 22:42:17 2010 -0700
@@ -71,8 +71,12 @@ struct vmx_domain {
cpumask_t ept_synced;
};
-#define ept_get_wl(d) \
+#define ept_get_wl(d) \
((d)->arch.hvm_domain.vmx.ept_control.ept_wl)
+#define ept_get_asr(d) \
+ ((d)->arch.hvm_domain.vmx.ept_control.asr)
+#define ept_get_eptp(d) \
+ ((d)->arch.hvm_domain.vmx.ept_control.eptp)
struct arch_vmx_struct {
/* Virtual address of VMCS. */
@@ -181,6 +185,9 @@ extern bool_t cpu_has_vmx_ins_outs_instr
#define VMX_EPT_MEMORY_TYPE_WB 0x00004000
#define VMX_EPT_SUPERPAGE_2MB 0x00010000
#define VMX_EPT_SUPERPAGE_1GB 0x00020000
+#define VMX_EPT_INVEPT_INSTRUCTION 0x00100000
+#define VMX_EPT_INVEPT_SINGLE_CONTEXT 0x02000000
+#define VMX_EPT_INVEPT_ALL_CONTEXT 0x04000000
#define cpu_has_wbinvd_exiting \
(vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
diff -r def12332b19c xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Thu Jun 10 11:18:02 2010 +0100
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Wed Jun 09 22:42:17 2010 -0700
@@ -43,9 +43,9 @@ typedef union {
u64 epte;
} ept_entry_t;
-#define EPT_TABLE_ORDER 9
+#define EPT_TABLE_ORDER 9
#define EPTE_SUPER_PAGE_MASK 0x80
-#define EPTE_MFN_MASK 0x1fffffffffff000
+#define EPTE_MFN_MASK 0xffffffffff000ULL
#define EPTE_AVAIL1_MASK 0xF00
#define EPTE_EMT_MASK 0x38
#define EPTE_IGMT_MASK 0x40
@@ -196,7 +196,11 @@ extern u64 vmx_ept_vpid_cap;
(vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
#define cpu_has_vmx_ept_2mb \
(vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
+#define cpu_has_vmx_ept_invept_single_context \
+ (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
+#define INVEPT_SINGLE_CONTEXT 1
+#define INVEPT_ALL_CONTEXT 2
static inline void __vmptrld(u64 addr)
{
@@ -280,18 +284,26 @@ static inline void __vm_clear_bit(unsign
__vmwrite(field, __vmread(field) & ~(1UL << bit));
}
-static inline void __invept(int ext, u64 eptp, u64 gpa)
+static inline void __invept(int type, u64 eptp, u64 gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
+
+ /*
+ * If single context invalidation is not supported, we escalate to
+ * use all context invalidation.
+ */
+ if ( !cpu_has_vmx_ept_invept_single_context &&
+ (type == INVEPT_SINGLE_CONTEXT) )
+ type = INVEPT_ALL_CONTEXT;
asm volatile ( INVEPT_OPCODE
MODRM_EAX_08
/* CF==1 or ZF==1 --> crash (ud2) */
"ja 1f ; ud2 ; 1:\n"
:
- : "a" (&operand), "c" (ext)
+ : "a" (&operand), "c" (type)
: "memory" );
}
@@ -318,10 +330,7 @@ static inline void __invvpid(int ext, u1
static inline void ept_sync_all(void)
{
- if ( !current->domain->arch.hvm_domain.hap_enabled )
- return;
-
- __invept(2, 0, 0);
+ __invept(INVEPT_ALL_CONTEXT, 0, 0);
}
void ept_sync_domain(struct domain *d);
enforce_invept_checking.patch
Description: enforce_invept_checking.patch
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|