WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [HVM][VMX] Fix data copying in transition

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [HVM][VMX] Fix data copying in transition to/from vmxassist.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 29 Sep 2006 11:30:17 +0000
Delivery-date: Sat, 30 Sep 2006 05:51:36 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 412fc1c1bd7a8ac70dace63b84bcc075c1fb8e0b
# Parent  6d5d5b883dfcf60f0b17fcfc31269e43aecd5b0c
[HVM][VMX] Fix data copying in transition to/from vmxassist.

In vmx_assist, the copy for new/old context and vmx assist magic are
all using physical address, while hvm_copy will use the virtual address.

This may cause problem when guest jump directly from real mode to
paging mode.

Signed-off-by: Yunhong Jiang <yunhong.jiang@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c            |   81 ++++++++++++++++++++++----------------
 xen/arch/x86/hvm/vmx/vmx.c        |   14 +++---
 xen/include/asm-x86/hvm/support.h |    1 
 3 files changed, 56 insertions(+), 40 deletions(-)

diff -r 6d5d5b883dfc -r 412fc1c1bd7a xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Sep 29 09:29:20 2006 +0100
+++ b/xen/arch/x86/hvm/hvm.c    Fri Sep 29 10:30:18 2006 +0100
@@ -389,42 +389,57 @@ void hvm_hlt(unsigned long rflags)
 }
 
 /*
- * Copy from/to guest virtual.
+ * __hvm_copy():
+ *  @buf  = hypervisor buffer
+ *  @addr = guest virtual or physical address to copy to/from
+ *  @size = number of bytes to copy
+ *  @dir  = HVM_COPY_IN / HVM_COPY_OUT
+ *  @phy  = interpret addr as physical or virtual address?
+ * Returns TRUE on success.
  */
+static int __hvm_copy(
+    void *buf, unsigned long addr, int size, int dir, int phy)
+{
+    struct vcpu *v = current;
+    unsigned long mfn;
+    char *p;
+    int count;
+
+    while ( size > 0 )
+    {
+        count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), size);
+
+        mfn = phy ? 
+            get_mfn_from_gpfn(addr >> PAGE_SHIFT) :
+            mfn_x(sh_vcpu_gfn_to_mfn(v, shadow_gva_to_gfn(v, addr)));
+        if ( mfn == INVALID_MFN )
+            return 0;
+
+        p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+
+        if ( dir == HVM_COPY_IN )
+            memcpy(buf, p, count);
+        else
+            memcpy(p, buf, count);
+
+        unmap_domain_page(p);
+
+        addr += count;
+        buf  += count;
+        size -= count;
+    }
+
+    return 1;
+}
+
+int hvm_copy_phy(void *buf, unsigned long paddr, int size, int dir)
+{
+    return __hvm_copy(buf, paddr, size, dir, 1);
+}
+
 int hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
 {
-    struct vcpu *v = current;
-    unsigned long gfn;
-    unsigned long mfn;
-    char *addr;
-    int count;
-
-    while (size > 0) {
-        count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
-        if (count > size)
-            count = size;
-
-        gfn = shadow_gva_to_gfn(v, vaddr);
-        mfn = mfn_x(sh_vcpu_gfn_to_mfn(v, gfn));
-
-        if (mfn == INVALID_MFN)
-            return 0;
-
-        addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
-
-        if (dir == HVM_COPY_IN)
-            memcpy(buf, addr, count);
-        else
-            memcpy(addr, buf, count);
-
-        unmap_domain_page(addr);
-
-        vaddr += count;
-        buf += count;
-        size -= count;
-    }
-
-    return 1;
+    return __hvm_copy(buf, vaddr, size, dir, 0);
 }
 
 /*
diff -r 6d5d5b883dfc -r 412fc1c1bd7a xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Sep 29 09:29:20 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Sep 29 10:30:18 2006 +0100
@@ -1371,7 +1371,7 @@ static int vmx_assist(struct vcpu *v, in
     u32 cp;
 
     /* make sure vmxassist exists (this is not an error) */
-    if (!hvm_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), HVM_COPY_IN))
+    if (!hvm_copy_phy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), 
HVM_COPY_IN))
         return 0;
     if (magic != VMXASSIST_MAGIC)
         return 0;
@@ -1385,20 +1385,20 @@ static int vmx_assist(struct vcpu *v, in
          */
     case VMX_ASSIST_INVOKE:
         /* save the old context */
-        if (!hvm_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
+        if (!hvm_copy_phy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
             goto error;
         if (cp != 0) {
             if (!vmx_world_save(v, &c))
                 goto error;
-            if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_OUT))
+            if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_OUT))
                 goto error;
         }
 
         /* restore the new context, this should activate vmxassist */
-        if (!hvm_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), HVM_COPY_IN))
+        if (!hvm_copy_phy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), HVM_COPY_IN))
             goto error;
         if (cp != 0) {
-            if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_IN))
+            if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_IN))
                 goto error;
             if (!vmx_world_restore(v, &c))
                 goto error;
@@ -1412,10 +1412,10 @@ static int vmx_assist(struct vcpu *v, in
          */
     case VMX_ASSIST_RESTORE:
         /* save the old context */
-        if (!hvm_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
+        if (!hvm_copy_phy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), HVM_COPY_IN))
             goto error;
         if (cp != 0) {
-            if (!hvm_copy(&c, cp, sizeof(c), HVM_COPY_IN))
+            if (!hvm_copy_phy(&c, cp, sizeof(c), HVM_COPY_IN))
                 goto error;
             if (!vmx_world_restore(v, &c))
                 goto error;
diff -r 6d5d5b883dfc -r 412fc1c1bd7a xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Fri Sep 29 09:29:20 2006 +0100
+++ b/xen/include/asm-x86/hvm/support.h Fri Sep 29 10:30:18 2006 +0100
@@ -138,6 +138,7 @@ extern int hvm_enabled;
 
 enum { HVM_COPY_IN = 0, HVM_COPY_OUT };
 extern int hvm_copy(void *buf, unsigned long vaddr, int size, int dir);
+extern int hvm_copy_phy(void *buf, unsigned long vaddr, int size, int dir);
 
 extern void hvm_setup_platform(struct domain* d);
 extern int hvm_mmio_intercept(ioreq_t *p);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [HVM][VMX] Fix data copying in transition to/from vmxassist., Xen patchbot-unstable <=