WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] add SMEP support to HVM guest

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] add SMEP support to HVM guest
From: "Li, Xin" <xin.li@xxxxxxxxx>
Date: Sun, 5 Jun 2011 08:11:10 +0800
Accept-language: zh-CN, en-US
Acceptlanguage: zh-CN, en-US
Cc: "'Keir Fraser \(keir.xen@xxxxxxxxx\)'" <keir.xen@xxxxxxxxx>, Keir Fraser <keir@xxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Delivery-date: Sat, 04 Jun 2011 17:12:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Thread-index: AcwjFQ+/QQaNGyEZTfi3rDAfFWTBYw==
Thread-topic: [PATCH] add SMEP support to HVM guest
Intel new CPU supports SMEP (Supervisor Mode Execution Protection). SMEP
prevents software operating with CPL < 3 (supervisor mode) from fetching
instructions from any linear address with a valid translation for which the U/S
flag (bit 2) is 1 in every paging-structure entry controlling the translation
for the linear address.

This patch adds SMEP support to HVM guest.

Signed-off-by: Yang Wei <wei.y.yang@xxxxxxxxx>
Signed-off-by: Shan Haitao <haitao.shan@xxxxxxxxx>
Signed-off-by: Li Xin <xin.li@xxxxxxxxx>

diff -r 0c0884fd8b49 tools/libxc/xc_cpufeature.h
--- a/tools/libxc/xc_cpufeature.h       Fri Jun 03 21:39:00 2011 +0100
+++ b/tools/libxc/xc_cpufeature.h       Sun Jun 05 08:07:48 2011 +0800
@@ -124,5 +124,6 @@
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
 #define X86_FEATURE_FSGSBASE     0 /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP         7 /* Supervisor Mode Execution Protection */
 
 #endif /* __LIBXC_CPUFEATURE_H */
diff -r 0c0884fd8b49 tools/libxc/xc_cpuid_x86.c
--- a/tools/libxc/xc_cpuid_x86.c        Fri Jun 03 21:39:00 2011 +0100
+++ b/tools/libxc/xc_cpuid_x86.c        Sun Jun 05 08:07:48 2011 +0800
@@ -351,6 +351,14 @@ static void xc_cpuid_hvm_policy(
             clear_bit(X86_FEATURE_PAE, regs[3]);
         break;
 
+    case 0x00000007: /* Intel-defined CPU features */
+        if ( input[1] == 0 ) {
+            regs[1] &= bitmaskof(X86_FEATURE_SMEP);
+        } else
+            regs[1] = 0;
+        regs[0] = regs[2] = regs[3] = 0;
+        break;
+
     case 0x0000000d:
         xc_cpuid_config_xsave(xch, domid, xfeature_mask, input, regs);
         break;
diff -r 0c0884fd8b49 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Jun 03 21:39:00 2011 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Sun Jun 05 08:07:48 2011 +0800
@@ -1664,7 +1664,8 @@ int hvm_set_cr4(unsigned long value)
     hvm_update_guest_cr(v, 4);
 
     /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
-    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) ) {
+    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE |
+                             X86_CR4_PAE | X86_CR4_SMEP) ) {
         if ( !nestedhvm_vmswitch_in_progress(v) && 
nestedhvm_vcpu_in_guestmode(v) )
             paging_update_nestedmode(v);
         else
@@ -2312,7 +2313,8 @@ enum hvm_copy_result hvm_copy_from_guest
 enum hvm_copy_result hvm_fetch_from_guest_virt(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) ||
+         (!(pfec | PFEC_user_mode) && hvm_smep_enabled(current)) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
@@ -2338,7 +2340,8 @@ enum hvm_copy_result hvm_copy_from_guest
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec)
 {
-    if ( hvm_nx_enabled(current) )
+    if ( hvm_nx_enabled(current) ||
+         (!(pfec | PFEC_user_mode) && hvm_smep_enabled(current)) )
         pfec |= PFEC_insn_fetch;
     return __hvm_copy(buf, vaddr, size,
                       HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
@@ -2408,6 +2411,10 @@ void hvm_cpuid(unsigned int input, unsig
             *ecx |= (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE) ?
                      cpufeat_mask(X86_FEATURE_OSXSAVE) : 0;
         break;
+    case 0x7:
+        if ( (count == 0) && !cpu_has_smep )
+            *ebx &= ~cpufeat_mask(X86_FEATURE_SMEP);
+        break;
     case 0xb:
         /* Fix the x2APIC identifier. */
         *edx = v->vcpu_id * 2;
diff -r 0c0884fd8b49 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c      Fri Jun 03 21:39:00 2011 +0100
+++ b/xen/arch/x86/mm/guest_walk.c      Sun Jun 05 08:07:48 2011 +0800
@@ -131,7 +131,7 @@ guest_walk_tables(struct vcpu *v, struct
     guest_l3e_t *l3p = NULL;
     guest_l4e_t *l4p;
 #endif
-    uint32_t gflags, mflags, iflags, rc = 0;
+    uint32_t gflags, mflags, iflags, user_flag = _PAGE_USER, rc = 0;
     int pse;
 
     perfc_incr(guest_walk);
@@ -153,6 +153,7 @@ guest_walk_tables(struct vcpu *v, struct
     l4p = (guest_l4e_t *) top_map;
     gw->l4e = l4p[guest_l4_table_offset(va)];
     gflags = guest_l4e_get_flags(gw->l4e) ^ iflags;
+    user_flag &= gflags;
     rc |= ((gflags & mflags) ^ mflags);
     if ( rc & _PAGE_PRESENT ) goto out;
 
@@ -167,6 +168,7 @@ guest_walk_tables(struct vcpu *v, struct
     /* Get the l3e and check its flags*/
     gw->l3e = l3p[guest_l3_table_offset(va)];
     gflags = guest_l3e_get_flags(gw->l3e) ^ iflags;
+    user_flag &= gflags;
     rc |= ((gflags & mflags) ^ mflags);
     if ( rc & _PAGE_PRESENT )
         goto out;
@@ -204,6 +206,7 @@ guest_walk_tables(struct vcpu *v, struct
 #endif /* All levels... */
 
     gflags = guest_l2e_get_flags(gw->l2e) ^ iflags;
+    user_flag &= gflags;
     rc |= ((gflags & mflags) ^ mflags);
     if ( rc & _PAGE_PRESENT )
         goto out;
@@ -268,6 +271,7 @@ guest_walk_tables(struct vcpu *v, struct
             goto out;
         gw->l1e = l1p[guest_l1_table_offset(va)];
         gflags = guest_l1e_get_flags(gw->l1e) ^ iflags;
+        user_flag &= gflags;
         rc |= ((gflags & mflags) ^ mflags);
     }
 
@@ -277,6 +281,11 @@ guest_walk_tables(struct vcpu *v, struct
      * walkers behave this way. */
     if ( rc == 0 ) 
     {
+        if ( guest_supports_smep(v) && user_flag &&
+             ((pfec & (PFEC_insn_fetch|PFEC_user_mode)) == PFEC_insn_fetch) ) {
+            rc = _PAGE_SMEP;
+            goto out;
+        }
 #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
         if ( set_ad_bits(l4p + guest_l4_table_offset(va), &gw->l4e, 0) )
             paging_mark_dirty(d, mfn_x(gw->l4mfn));
diff -r 0c0884fd8b49 xen/include/asm-x86/guest_pt.h
--- a/xen/include/asm-x86/guest_pt.h    Fri Jun 03 21:39:00 2011 +0100
+++ b/xen/include/asm-x86/guest_pt.h    Sun Jun 05 08:07:48 2011 +0800
@@ -203,6 +203,13 @@ guest_supports_nx(struct vcpu *v)
     return hvm_nx_enabled(v);
 }
 
+static inline int
+guest_supports_smep(struct vcpu *v)
+{
+    if ( !is_hvm_vcpu(v) )
+        return 0;
+    return hvm_smep_enabled(v);
+}
 
 /* Some bits are invalid in any pagetable entry. */
 #if GUEST_PAGING_LEVELS == 2
diff -r 0c0884fd8b49 xen/include/asm-x86/hvm/hvm.h
--- a/xen/include/asm-x86/hvm/hvm.h     Fri Jun 03 21:39:00 2011 +0100
+++ b/xen/include/asm-x86/hvm/hvm.h     Sun Jun 05 08:07:48 2011 +0800
@@ -212,6 +212,8 @@ int hvm_girq_dest_2_vcpu_id(struct domai
     (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_WP))
 #define hvm_pae_enabled(v) \
     (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
+#define hvm_smep_enabled(v) \
+    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_SMEP))
 #define hvm_nx_enabled(v) \
     (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
 
@@ -321,6 +323,7 @@ static inline int hvm_do_pmu_interrupt(s
         X86_CR4_DE  | X86_CR4_PSE | X86_CR4_PAE |       \
         X86_CR4_MCE | X86_CR4_PGE | X86_CR4_PCE |       \
         X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT |           \
+        (cpu_has_smep ? X86_CR4_SMEP : 0) |             \
         (xsave_enabled(_v) ? X86_CR4_OSXSAVE : 0))))
 
 /* These exceptions must always be intercepted. */
diff -r 0c0884fd8b49 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Fri Jun 03 21:39:00 2011 +0100
+++ b/xen/include/asm-x86/page.h        Sun Jun 05 08:07:48 2011 +0800
@@ -326,6 +326,7 @@ void setup_idle_pagetable(void);
 #define _PAGE_PSE_PAT 0x1000U
 #define _PAGE_PAGED   0x2000U
 #define _PAGE_SHARED  0x4000U
+#define _PAGE_SMEP    0x8000U
 
 /*
  * Debug option: Ensure that granted mappings are not implicitly unmapped.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

<Prev in Thread] Current Thread [Next in Thread>