WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86:xsaveopt: Enable xsaveopt feature in

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86:xsaveopt: Enable xsaveopt feature in Xen and guest
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 23 Dec 2010 05:35:16 -0800
Delivery-date: Thu, 23 Dec 2010 05:48:12 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1292577900 0
# Node ID 7d567f932e0bc83ae84a6a41e6a85d89fb69c1b1
# Parent  f5f3cf4e001f0b6d4eaddc371958100917b7ea2c
x86:xsaveopt: Enable xsaveopt feature in Xen and guest

This patch uses "xsaveopt" instead of "xsave" if the feature is
supported in hardware to optimize task switch performance in Xen. It
also exposes the feature to guest VMs.

Signed-off-by: Zhang Fengzhe <fengzhe.zhang@xxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c     |    3 +++
 xen/arch/x86/i387.c        |   11 ++++++++++-
 xen/include/asm-x86/i387.h |   14 ++++++++++++++
 3 files changed, 27 insertions(+), 1 deletion(-)

diff -r f5f3cf4e001f -r 7d567f932e0b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Dec 16 20:07:03 2010 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Dec 17 09:25:00 2010 +0000
@@ -2171,6 +2171,9 @@ void hvm_cpuid(unsigned int input, unsig
                 *ebx = XSTATE_YMM_OFFSET;
                 break;
             case 1:
+                if ( cpu_has_xsaveopt )
+                    *eax = XSAVEOPT;
+                break;
             default:
                 break;
             }
diff -r f5f3cf4e001f -r 7d567f932e0b xen/arch/x86/i387.c
--- a/xen/arch/x86/i387.c       Thu Dec 16 20:07:03 2010 +0000
+++ b/xen/arch/x86/i387.c       Fri Dec 17 09:25:00 2010 +0000
@@ -39,7 +39,10 @@ void save_init_fpu(struct vcpu *v)
          * we set all accumulated feature mask before doing save/restore.
          */
         set_xcr0(v->arch.xcr0_accum);
-        xsave(v);
+        if ( cpu_has_xsaveopt )
+            xsaveopt(v);
+        else
+            xsave(v);
         set_xcr0(v->arch.xcr0);
     }
     else if ( cpu_has_fxsr )
@@ -152,6 +155,8 @@ u64 xfeature_mask;
 /* Cached xcr0 for fast read */
 DEFINE_PER_CPU(uint64_t, xcr0);
 
+bool_t __read_mostly cpu_has_xsaveopt;
+
 void xsave_init(void)
 {
     u32 eax, ebx, ecx, edx;
@@ -196,6 +201,10 @@ void xsave_init(void)
         xfeature_mask &= XCNTXT_MASK;
         printk("%s: using cntxt_size: 0x%x and states: 0x%"PRIx64"\n",
             __func__, xsave_cntxt_size, xfeature_mask);
+
+        /* Check XSAVEOPT feature. */
+        cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+        cpu_has_xsaveopt = !!(eax & XSAVEOPT);
     }
     else
     {
diff -r f5f3cf4e001f -r 7d567f932e0b xen/include/asm-x86/i387.h
--- a/xen/include/asm-x86/i387.h        Thu Dec 16 20:07:03 2010 +0000
+++ b/xen/include/asm-x86/i387.h        Fri Dec 17 09:25:00 2010 +0000
@@ -16,6 +16,7 @@
 
 extern unsigned int xsave_cntxt_size;
 extern u64 xfeature_mask;
+extern bool_t cpu_has_xsaveopt;
 
 void xsave_init(void);
 int xsave_alloc_save_area(struct vcpu *v);
@@ -28,6 +29,7 @@ void xsave_free_save_area(struct vcpu *v
 #define XCNTXT_MASK     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
 #define XSTATE_YMM_OFFSET  (512 + 64)
 #define XSTATE_YMM_SIZE    256
+#define XSAVEOPT        (1 << 0)
 
 struct xsave_struct
 {
@@ -79,6 +81,18 @@ static inline void xsave(struct vcpu *v)
     ptr =(struct xsave_struct *)v->arch.xsave_area;
 
     asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
+        :
+        : "a" (-1), "d" (-1), "D"(ptr)
+        : "memory");
+}
+
+static inline void xsaveopt(struct vcpu *v)
+{
+    struct xsave_struct *ptr;
+
+    ptr =(struct xsave_struct *)v->arch.xsave_area;
+
+    asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x37"
         :
         : "a" (-1), "d" (-1), "D"(ptr)
         : "memory");

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86:xsaveopt: Enable xsaveopt feature in Xen and guest, Xen patchbot-unstable <=