WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-3.1-testing] hvm: For functions which translate vir

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-3.1-testing] hvm: For functions which translate virtual addresses to machine
From: "Xen patchbot-3.1-testing" <patchbot-3.1-testing@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 10 Jan 2008 06:30:20 -0800
Delivery-date: Thu, 10 Jan 2008 06:30:53 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1198794744 0
# Node ID 874a20ac1ffdb23f75421900cb0113e7dc5204a9
# Parent  5e8068c541fc4123646150af21e175412cf962d2
hvm: For functions which translate virtual addresses to machine
addresses, page faults should only be raised when the gva->gfn
translation fails. These should be distinguished from gfn->mfn
translation failures.

The main effect of this is to change the behaviour of functions
derived from __hvm_copy(), which now returns a three-way enumeration,
and also can automatically inject #PF when the gva->gfn translation
fails.

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
xen-unstable changeset:   16662:e818c24cec03
xen-unstable date:        Thu Dec 27 12:00:30 2007 +0000
---
 xen/arch/x86/hvm/hvm.c            |   73 +++++++++++++++++++++++---------------
 xen/arch/x86/hvm/io.c             |   22 ++++++-----
 xen/arch/x86/hvm/platform.c       |   22 +++++++----
 xen/arch/x86/hvm/svm/svm.c        |   11 +++--
 xen/arch/x86/hvm/vmx/vmx.c        |    7 ++-
 xen/arch/x86/mm/shadow/common.c   |   18 ++++++---
 xen/arch/x86/mm/shadow/multi.c    |   35 +++++++++++-------
 xen/include/asm-x86/hvm/support.h |   33 +++++++++++++++--
 8 files changed, 148 insertions(+), 73 deletions(-)

diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Dec 27 22:32:24 2007 +0000
@@ -562,11 +562,12 @@ void hvm_triple_fault(void)
  *  @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
  * Returns number of bytes failed to copy (0 == complete success).
  */
-static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
+static enum hvm_copy_result __hvm_copy(
+    void *buf, paddr_t addr, int size, int dir, int virt, int *ptodo)
 {
     unsigned long gfn, mfn;
     char *p;
-    int count, todo;
+    int count, todo, rc = HVMCOPY_okay;
 
     todo = size;
     while ( todo > 0 )
@@ -574,14 +575,26 @@ static int __hvm_copy(void *buf, paddr_t
         count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
 
         if ( virt )
+        {
             gfn = paging_gva_to_gfn(current, addr);
+            if ( gfn == INVALID_GFN )
+            {
+                rc = HVMCOPY_bad_gva_to_gfn;
+                goto out;
+            }
+        }
         else
+        {
             gfn = addr >> PAGE_SHIFT;
-        
+        }
+
         mfn = get_mfn_from_gpfn(gfn);
 
         if ( mfn == INVALID_MFN )
-            return todo;
+        {
+            rc = HVMCOPY_bad_gfn_to_mfn;
+            goto out;
+        }
 
         p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
 
@@ -600,29 +613,35 @@ static int __hvm_copy(void *buf, paddr_t
         todo -= count;
     }
 
-    return 0;
-}
-
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
-{
-    return __hvm_copy(buf, paddr, size, 1, 0);
-}
-
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
-{
-    return __hvm_copy(buf, paddr, size, 0, 0);
-}
-
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 1, 1);
-}
-
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
-{
-    return __hvm_copy(buf, vaddr, size, 0, 1);
-}
-
+ out:
+    if ( ptodo )
+        *ptodo = todo;
+    return rc;
+}
+
+enum hvm_copy_result hvm_copy_to_guest_phys(
+    paddr_t paddr, void *buf, int size)
+{
+    return __hvm_copy(buf, paddr, size, 1, 0, NULL);
+}
+
+enum hvm_copy_result hvm_copy_from_guest_phys(
+    void *buf, paddr_t paddr, int size)
+{
+    return __hvm_copy(buf, paddr, size, 0, 0, NULL);
+}
+
+enum hvm_copy_result hvm_copy_to_guest_virt(
+    unsigned long vaddr, void *buf, int size, int *ptodo)
+{
+    return __hvm_copy(buf, vaddr, size, 1, 1, ptodo);
+}
+
+enum hvm_copy_result hvm_copy_from_guest_virt(
+    void *buf, unsigned long vaddr, int size, int *ptodo)
+{
+    return __hvm_copy(buf, vaddr, size, 0, 1, ptodo);
+}
 
 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
 void hvm_print_line(struct vcpu *v, const char c)
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/hvm/io.c     Thu Dec 27 22:32:24 2007 +0000
@@ -432,12 +432,14 @@ static void hvm_pio_assist(struct cpu_us
                 unsigned long addr = pio_opp->addr;
                 if ( hvm_paging_enabled(current) )
                 {
-                    int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
-                    if ( rv != 0 )
+                    int rv, todo;
+                    rv = hvm_copy_to_guest_virt(addr, &p->data, p->size,
+                                                &todo);
+                    if ( rv == HVMCOPY_bad_gva_to_gfn )
                     {
                         /* Failed on the page-spanning copy.  Inject PF into
                          * the guest for the address where we failed. */
-                        addr += p->size - rv;
+                        addr += p->size - todo;
                         gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side "
                                  "of a page-spanning PIO: va=%#lx\n", addr);
                         hvm_inject_exception(TRAP_page_fault,
@@ -563,12 +565,13 @@ static void hvm_mmio_assist(struct cpu_u
 
             if (hvm_paging_enabled(current))
             {
-                int rv = hvm_copy_to_guest_virt(addr, &p->data, p->size);
-                if ( rv != 0 )
+                int rv, todo;
+                rv = hvm_copy_to_guest_virt(addr, &p->data, p->size, &todo);
+                if ( rv == HVMCOPY_bad_gva_to_gfn )
                 {
                     /* Failed on the page-spanning copy.  Inject PF into
                      * the guest for the address where we failed. */
-                    addr += p->size - rv;
+                    addr += p->size - todo;
                     gdprintk(XENLOG_DEBUG, "Pagefault writing non-io side of "
                              "a page-spanning MMIO: va=%#lx\n", addr);
                     hvm_inject_exception(TRAP_page_fault,
@@ -806,10 +809,11 @@ static void hvm_mmio_assist(struct cpu_u
         mmio_opp->addr += hvm_get_segment_base(current, x86_seg_ss);
         {
             unsigned long addr = mmio_opp->addr;
-            int rv = hvm_copy_to_guest_virt(addr, &p->data, size);
-            if ( rv != 0 )
+            int rv, todo;
+            rv = hvm_copy_to_guest_virt(addr, &p->data, size, &todo);
+            if ( rv == HVMCOPY_bad_gva_to_gfn )
             {
-                addr += p->size - rv;
+                addr += p->size - todo;
                 gdprintk(XENLOG_DEBUG, "Pagefault emulating PUSH from MMIO:"
                          " va=%#lx\n", addr);
                 hvm_inject_exception(TRAP_page_fault, PFEC_write_access, addr);
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/hvm/platform.c       Thu Dec 27 22:32:24 2007 +0000
@@ -829,11 +829,12 @@ static int mmio_decode(int address_bytes
     }
 }
 
-int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int 
inst_len)
+int inst_copy_from_guest(
+    unsigned char *buf, unsigned long guest_eip, int inst_len)
 {
     if ( inst_len > MAX_INST_LEN || inst_len <= 0 )
         return 0;
-    if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len) )
+    if ( hvm_copy_from_guest_virt(buf, guest_eip, inst_len, NULL) )
         return 0;
     return inst_len;
 }
@@ -1147,15 +1148,16 @@ void handle_mmio(paddr_t gpa)
             if ( dir == IOREQ_WRITE ) {
                 if ( hvm_paging_enabled(v) )
                 {
-                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                    if ( rv != 0 ) 
+                    int rv, todo;
+                    rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+                    if ( rv == HVMCOPY_bad_gva_to_gfn ) 
                     {
                         /* Failed on the page-spanning copy.  Inject PF into
                          * the guest for the address where we failed */
                         regs->eip -= inst_len; /* do not advance %eip */
                         regs->eflags |= X86_EFLAGS_RF; /* RF was set by #PF */
                         /* Must set CR2 at the failing address */ 
-                        addr += size - rv;
+                        addr += size - todo;
                         gdprintk(XENLOG_DEBUG, "Pagefault on non-io side of a "
                                  "page-spanning MMIO: va=%#lx\n", addr);
                         hvm_inject_exception(TRAP_page_fault, 0, addr);
@@ -1319,24 +1321,30 @@ DEFINE_PER_CPU(int, guest_handles_in_xen
    this. */
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
 {
+    int todo;
+
     if ( this_cpu(guest_handles_in_xen_space) )
     {
         memcpy(to, from, len);
         return 0;
     }
 
-    return hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len);
+    hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, &todo);
+    return todo;
 }
 
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
 {
+    int todo;
+
     if ( this_cpu(guest_handles_in_xen_space) )
     {
         memcpy(to, from, len);
         return 0;
     }
 
-    return hvm_copy_from_guest_virt(to, (unsigned long)from, len);
+    hvm_copy_from_guest_virt(to, (unsigned long)from, len, &todo);
+    return todo;
 }
 
 /*
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 22:32:24 2007 +0000
@@ -1602,12 +1602,13 @@ static void svm_io_instruction(struct vc
             {
                 if ( hvm_paging_enabled(current) )
                 {
-                    int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                    if ( rv != 0 ) 
+                    int rv, todo;
+                    rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+                    if ( rv == HVMCOPY_bad_gva_to_gfn ) 
                     {
                         /* Failed on the page-spanning copy.  Inject PF into
                          * the guest for the address where we failed. */
-                        addr += size - rv;
+                        addr += size - todo;
                         gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
                                  "of a page-spanning PIO: va=%#lx\n", addr);
                         svm_hvm_inject_exception(TRAP_page_fault, 0, addr);
@@ -2087,7 +2088,7 @@ static int svm_cr_access(struct vcpu *v,
             offset = ( addr_size == 4 ) ? offset : ( offset & 0xFFFF );
             addr = hvm_get_segment_base(v, seg);
             addr += offset;
-            hvm_copy_to_guest_virt(addr,&value,2);
+            hvm_copy_to_guest_virt(addr, &value, 2, NULL);
         }
         else
         {
@@ -2103,7 +2104,7 @@ static int svm_cr_access(struct vcpu *v,
     }
 
     __update_guest_eip(vmcb, inst_len);
-    
+
     return result;
 }
 
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 22:32:24 2007 +0000
@@ -1718,12 +1718,13 @@ static void vmx_send_str_pio(struct cpu_
         {
             if ( hvm_paging_enabled(current) )
             {
-                int rv = hvm_copy_from_guest_virt(&value, addr, size);
-                if ( rv != 0 )
+                int rv, todo;
+                rv = hvm_copy_from_guest_virt(&value, addr, size, &todo);
+                if ( rv == HVMCOPY_bad_gva_to_gfn )
                 {
                     /* Failed on the page-spanning copy.  Inject PF into
                      * the guest for the address where we failed. */
-                    addr += size - rv;
+                    addr += size - todo;
                     gdprintk(XENLOG_DEBUG, "Pagefault reading non-io side "
                              "of a page-spanning PIO: va=%#lx\n", addr);
                     vmx_inject_exception(TRAP_page_fault, 0, addr);
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 22:32:24 2007 +0000
@@ -204,7 +204,7 @@ hvm_read(enum x86_segment seg,
          struct sh_emulate_ctxt *sh_ctxt)
 {
     unsigned long addr;
-    int rc, errcode;
+    int rc, errcode, todo;
 
     rc = hvm_translate_linear_addr(
         seg, offset, bytes, access_type, sh_ctxt, &addr);
@@ -216,8 +216,16 @@ hvm_read(enum x86_segment seg,
     //        It entirely ignores the permissions in the page tables.
     //        In this case, that is only a user vs supervisor access check.
     //
-    if ( (rc = hvm_copy_from_guest_virt(val, addr, bytes)) == 0 )
+    rc = hvm_copy_from_guest_virt(val, addr, bytes, &todo);
+    switch ( rc )
+    {
+    case HVMCOPY_okay:
         return X86EMUL_OKAY;
+    case HVMCOPY_bad_gva_to_gfn:
+        break;
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
 
     /* If we got here, there was nothing mapped here, or a bad GFN 
      * was mapped here.  This should never happen: we're here because
@@ -226,7 +234,7 @@ hvm_read(enum x86_segment seg,
     errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
     if ( access_type == hvm_access_insn_fetch )
         errcode |= PFEC_insn_fetch;
-    hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
+    hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - todo);
     return X86EMUL_EXCEPTION;
 }
 
@@ -458,7 +466,7 @@ struct x86_emulate_ops *shadow_init_emul
             x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
          !hvm_copy_from_guest_virt(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -486,7 +494,7 @@ void shadow_continue_emulation(struct sh
                     x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
                     hvm_access_insn_fetch, sh_ctxt, &addr) &&
                  !hvm_copy_from_guest_virt(
-                     sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf)))
+                     sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), NULL))
                 ? sizeof(sh_ctxt->insn_buf) : 0;
             sh_ctxt->insn_buf_eip = regs->eip;
         }
diff -r 5e8068c541fc -r 874a20ac1ffd xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 22:32:24 2007 +0000
@@ -3954,10 +3954,13 @@ int sh_remove_l3_shadow(struct vcpu *v, 
 /* Check that the user is allowed to perform this write. 
  * Returns a mapped pointer to write to, and the mfn it's on,
  * or NULL for error. */
-static inline void * emulate_map_dest(struct vcpu *v,
-                                      unsigned long vaddr,
-                                      struct sh_emulate_ctxt *sh_ctxt,
-                                      mfn_t *mfnp)
+#define MAPPING_UNHANDLEABLE ((void *)0)
+#define MAPPING_EXCEPTION    ((void *)1)
+#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1)
+static inline void *emulate_map_dest(struct vcpu *v,
+                                     unsigned long vaddr,
+                                     struct sh_emulate_ctxt *sh_ctxt,
+                                     mfn_t *mfnp)
 {
     walk_t gw;
     u32 flags, errcode;
@@ -3966,7 +3969,7 @@ static inline void * emulate_map_dest(st
 
     /* We don't emulate user-mode writes to page tables */
     if ( ring_3(sh_ctxt->ctxt.regs) ) 
-        return NULL;
+        return MAPPING_UNHANDLEABLE;
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
     /* Try the virtual TLB first */
@@ -4019,14 +4022,14 @@ static inline void * emulate_map_dest(st
         return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
     }
     else 
-        return NULL;
+        return MAPPING_UNHANDLEABLE;
 
  page_fault:
     if ( is_hvm_vcpu(v) )
         hvm_inject_exception(TRAP_page_fault, errcode, vaddr);
     else
         propagate_page_fault(vaddr, errcode);
-    return NULL;
+    return MAPPING_EXCEPTION;
 }
 
 static int safe_not_to_verify_write(mfn_t gmfn, void *dst, void *src, 
@@ -4071,8 +4074,10 @@ sh_x86_emulate_write(struct vcpu *v, uns
     ASSERT(shadow_locked_by_me(v->domain));
     ASSERT(((vaddr & ~PAGE_MASK) + bytes) <= PAGE_SIZE);
 
-    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
-        return X86EMUL_EXCEPTION;
+    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+    if ( emulate_map_dest_failed(addr) )
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
 
     skip = safe_not_to_verify_write(mfn, addr, src, bytes);
     memcpy(addr, src, bytes);
@@ -4107,8 +4112,10 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     if ( vaddr & (bytes-1) )
         return X86EMUL_UNHANDLEABLE;
 
-    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
-        return X86EMUL_EXCEPTION;
+    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+    if ( emulate_map_dest_failed(addr) )
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
 
     skip = safe_not_to_verify_write(mfn, &new, &old, bytes);
 
@@ -4163,8 +4170,10 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     if ( vaddr & 7 )
         return X86EMUL_UNHANDLEABLE;
 
-    if ( (addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn)) == NULL )
-        return X86EMUL_EXCEPTION;
+    addr = emulate_map_dest(v, vaddr, sh_ctxt, &mfn);
+    if ( emulate_map_dest_failed(addr) )
+        return ((addr == MAPPING_EXCEPTION) ?
+                X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
 
     old = (((u64) old_hi) << 32) | (u64) old_lo;
     new = (((u64) new_hi) << 32) | (u64) new_lo;
diff -r 5e8068c541fc -r 874a20ac1ffd xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Thu Dec 27 21:55:38 2007 +0000
+++ b/xen/include/asm-x86/hvm/support.h Thu Dec 27 22:32:24 2007 +0000
@@ -219,10 +219,35 @@ void hvm_enable(struct hvm_function_tabl
 void hvm_enable(struct hvm_function_table *);
 void hvm_disable(void);
 
-int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
-int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
-int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size);
-int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size);
+enum hvm_copy_result {
+    HVMCOPY_okay = 0,
+    HVMCOPY_bad_gva_to_gfn,
+    HVMCOPY_bad_gfn_to_mfn
+};
+
+/*
+ * Copy to/from a guest physical address.
+ * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * address range does not map entirely onto ordinary machine memory.
+ */
+enum hvm_copy_result hvm_copy_to_guest_phys(
+    paddr_t paddr, void *buf, int size);
+enum hvm_copy_result hvm_copy_from_guest_phys(
+    void *buf, paddr_t paddr, int size);
+
+/*
+ * Copy to/from a guest virtual address.
+ * Returns:
+ *  HVMCOPY_okay: Copy was entirely successful.
+ *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
+ *                          ordinary machine memory.
+ *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
+ *                          mapping to a guest physical address.
+ */
+enum hvm_copy_result hvm_copy_to_guest_virt(
+    unsigned long vaddr, void *buf, int size, int *ptodo);
+enum hvm_copy_result hvm_copy_from_guest_virt(
+    void *buf, unsigned long vaddr, int size, int *ptodo);
 
 void hvm_print_line(struct vcpu *v, const char c);
 void hlt_timer_fn(void *data);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-3.1-testing] hvm: For functions which translate virtual addresses to machine, Xen patchbot-3.1-testing <=