WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] hvm: Clean up I/O emulation

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] hvm: Clean up I/O emulation
From: Xen patchbot-unstable <patchbot@xxxxxxx>
Date: Wed, 26 Oct 2011 20:55:10 +0100
Delivery-date: Wed, 26 Oct 2011 12:55:28 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Christoph Egger <Christoph.Egger@xxxxxxx>
# Date 1319557697 -3600
# Node ID 51a0c81a40302ac8fd0ce8066ba16d2e3fdd7664
# Parent  8943a9696358eb4f879cb64053a5f296de4ff173
hvm: Clean up I/O emulation

Move HVM io fields into a structure.
On MMIO instruction failure print out some more bytes.

Signed-off-by: Christoph Egger <Christoph.Egger@xxxxxxx>
Committed-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 8943a9696358 -r 51a0c81a4030 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Tue Oct 25 16:46:47 2011 +0100
+++ b/xen/arch/x86/hvm/emulate.c        Tue Oct 25 16:48:17 2011 +0100
@@ -55,6 +55,7 @@
     paddr_t value = ram_gpa;
     int value_is_ptr = (p_data == NULL);
     struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio;
     ioreq_t *p = get_ioreq(curr);
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
@@ -90,43 +91,45 @@
         p_data = NULL;
     }
 
+    vio = &curr->arch.hvm_vcpu.hvm_io;
+
     if ( is_mmio && !value_is_ptr )
     {
         /* Part of a multi-cycle read or write? */
         if ( dir == IOREQ_WRITE )
         {
-            paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa;
-            unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes;
+            paddr_t pa = vio->mmio_large_write_pa;
+            unsigned int bytes = vio->mmio_large_write_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
                 return X86EMUL_OKAY;
         }
         else
         {
-            paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa;
-            unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
+            paddr_t pa = vio->mmio_large_read_pa;
+            unsigned int bytes = vio->mmio_large_read_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
             {
-                memcpy(p_data, &curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
+                memcpy(p_data, &vio->mmio_large_read[addr - pa],
                        size);
                 return X86EMUL_OKAY;
             }
         }
     }
 
-    switch ( curr->arch.hvm_vcpu.io_state )
+    switch ( vio->io_state )
     {
     case HVMIO_none:
         break;
     case HVMIO_completed:
-        curr->arch.hvm_vcpu.io_state = HVMIO_none;
+        vio->io_state = HVMIO_none;
         if ( p_data == NULL )
             return X86EMUL_UNHANDLEABLE;
         goto finish_access;
     case HVMIO_dispatched:
         /* May have to wait for previous cycle of a multi-write to complete. */
         if ( is_mmio && !value_is_ptr && (dir == IOREQ_WRITE) &&
-             (addr == (curr->arch.hvm_vcpu.mmio_large_write_pa +
-                       curr->arch.hvm_vcpu.mmio_large_write_bytes)) )
+             (addr == (vio->mmio_large_write_pa +
+                       vio->mmio_large_write_bytes)) )
             return X86EMUL_RETRY;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -139,9 +142,9 @@
         return X86EMUL_UNHANDLEABLE;
     }
 
-    curr->arch.hvm_vcpu.io_state =
+    vio->io_state =
         (p_data == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
-    curr->arch.hvm_vcpu.io_size = size;
+    vio->io_size = size;
 
     p->dir = dir;
     p->data_is_ptr = value_is_ptr;
@@ -172,12 +175,12 @@
         *reps = p->count;
         p->state = STATE_IORESP_READY;
         hvm_io_assist();
-        curr->arch.hvm_vcpu.io_state = HVMIO_none;
+        vio->io_state = HVMIO_none;
         break;
     case X86EMUL_UNHANDLEABLE:
         rc = X86EMUL_RETRY;
         if ( !hvm_send_assist_req(curr) )
-            curr->arch.hvm_vcpu.io_state = HVMIO_none;
+            vio->io_state = HVMIO_none;
         else if ( p_data == NULL )
             rc = X86EMUL_OKAY;
         break;
@@ -190,33 +193,32 @@
 
  finish_access:
     if ( p_data != NULL )
-        memcpy(p_data, &curr->arch.hvm_vcpu.io_data, size);
+        memcpy(p_data, &vio->io_data, size);
 
     if ( is_mmio && !value_is_ptr )
     {
         /* Part of a multi-cycle read or write? */
         if ( dir == IOREQ_WRITE )
         {
-            paddr_t pa = curr->arch.hvm_vcpu.mmio_large_write_pa;
-            unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_write_bytes;
+            paddr_t pa = vio->mmio_large_write_pa;
+            unsigned int bytes = vio->mmio_large_write_bytes;
             if ( bytes == 0 )
-                pa = curr->arch.hvm_vcpu.mmio_large_write_pa = addr;
+                pa = vio->mmio_large_write_pa = addr;
             if ( addr == (pa + bytes) )
-                curr->arch.hvm_vcpu.mmio_large_write_bytes += size;
+                vio->mmio_large_write_bytes += size;
         }
         else
         {
-            paddr_t pa = curr->arch.hvm_vcpu.mmio_large_read_pa;
-            unsigned int bytes = curr->arch.hvm_vcpu.mmio_large_read_bytes;
+            paddr_t pa = vio->mmio_large_read_pa;
+            unsigned int bytes = vio->mmio_large_read_bytes;
             if ( bytes == 0 )
-                pa = curr->arch.hvm_vcpu.mmio_large_read_pa = addr;
+                pa = vio->mmio_large_read_pa = addr;
             if ( (addr == (pa + bytes)) &&
                  ((bytes + size) <
-                  sizeof(curr->arch.hvm_vcpu.mmio_large_read)) )
+                  sizeof(vio->mmio_large_read)) )
             {
-                memcpy(&curr->arch.hvm_vcpu.mmio_large_read[addr - pa],
-                       p_data, size);
-                curr->arch.hvm_vcpu.mmio_large_read_bytes += size;
+                memcpy(&vio->mmio_large_read[addr - pa], p_data, size);
+                vio->mmio_large_read_bytes += size;
             }
         }
     }
@@ -400,6 +402,7 @@
     struct vcpu *curr = current;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
@@ -408,13 +411,12 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
-         curr->arch.hvm_vcpu.mmio_gva )
+    if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
     {
         unsigned int off = addr & (PAGE_SIZE - 1);
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
-        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
         if ( (off + bytes) <= PAGE_SIZE )
             return hvmemul_do_mmio(gpa, &reps, bytes, 0,
                                    IOREQ_READ, 0, p_data);
@@ -499,6 +501,7 @@
     struct vcpu *curr = current;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     paddr_t gpa;
     int rc;
 
@@ -507,11 +510,10 @@
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
-         curr->arch.hvm_vcpu.mmio_gva )
+    if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
     {
         unsigned int off = addr & (PAGE_SIZE - 1);
-        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
+        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
         if ( (off + bytes) <= PAGE_SIZE )
             return hvmemul_do_mmio(gpa, &reps, bytes, 0,
                                    IOREQ_WRITE, 0, p_data);
@@ -529,7 +531,7 @@
         return X86EMUL_EXCEPTION;
     case HVMCOPY_unhandleable:
         return X86EMUL_UNHANDLEABLE;
-    case  HVMCOPY_bad_gfn_to_mfn:
+    case HVMCOPY_bad_gfn_to_mfn:
         rc = hvmemul_linear_to_phys(
             addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt);
         if ( rc != X86EMUL_OKAY )
@@ -973,6 +975,7 @@
     struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;
     struct vcpu *curr = current;
     uint32_t new_intr_shadow, pfec = PFEC_page_present;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     unsigned long addr;
     int rc;
 
@@ -1010,8 +1013,7 @@
     rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);
 
     if ( rc != X86EMUL_RETRY )
-        curr->arch.hvm_vcpu.mmio_large_read_bytes =
-            curr->arch.hvm_vcpu.mmio_large_write_bytes = 0;
+        vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
 
     if ( rc != X86EMUL_OKAY )
         return rc;
diff -r 8943a9696358 -r 51a0c81a4030 xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Tue Oct 25 16:46:47 2011 +0100
+++ b/xen/arch/x86/hvm/io.c     Tue Oct 25 16:48:17 2011 +0100
@@ -170,28 +170,31 @@
 {
     struct hvm_emulate_ctxt ctxt;
     struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     int rc;
 
     hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
 
     rc = hvm_emulate_one(&ctxt);
 
-    if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
-        curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
+    if ( vio->io_state == HVMIO_awaiting_completion )
+        vio->io_state = HVMIO_handle_mmio_awaiting_completion;
     else
-        curr->arch.hvm_vcpu.mmio_gva = 0;
+        vio->mmio_gva = 0;
 
     switch ( rc )
     {
     case X86EMUL_UNHANDLEABLE:
         gdprintk(XENLOG_WARNING,
                  "MMIO emulation failed @ %04x:%lx: "
-                 "%02x %02x %02x %02x %02x %02x\n",
+                 "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
                  hvmemul_get_seg_reg(x86_seg_cs, &ctxt)->sel,
                  ctxt.insn_buf_eip,
                  ctxt.insn_buf[0], ctxt.insn_buf[1],
                  ctxt.insn_buf[2], ctxt.insn_buf[3],
-                 ctxt.insn_buf[4], ctxt.insn_buf[5]);
+                 ctxt.insn_buf[4], ctxt.insn_buf[5],
+                 ctxt.insn_buf[6], ctxt.insn_buf[7],
+                 ctxt.insn_buf[8], ctxt.insn_buf[9]);
         return 0;
     case X86EMUL_EXCEPTION:
         if ( ctxt.exn_pending )
@@ -208,14 +211,16 @@
 
 int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
 {
-    current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK;
-    current->arch.hvm_vcpu.mmio_gpfn = gpfn;
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+    vio->mmio_gva = gva & PAGE_MASK;
+    vio->mmio_gpfn = gpfn;
     return handle_mmio();
 }
 
 int handle_pio(uint16_t port, int size, int dir)
 {
     struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     unsigned long data, reps = 1;
     int rc;
 
@@ -228,15 +233,14 @@
     {
     case X86EMUL_OKAY:
         if ( dir == IOREQ_READ )
-            memcpy(&guest_cpu_user_regs()->eax,
-                   &data, curr->arch.hvm_vcpu.io_size);
+            memcpy(&guest_cpu_user_regs()->eax, &data, vio->io_size);
         break;
     case X86EMUL_RETRY:
-        if ( curr->arch.hvm_vcpu.io_state != HVMIO_awaiting_completion )
+        if ( vio->io_state != HVMIO_awaiting_completion )
             return 0;
         /* Completion in hvm_io_assist() with no re-emulation required. */
         ASSERT(dir == IOREQ_READ);
-        curr->arch.hvm_vcpu.io_state = HVMIO_handle_pio_awaiting_completion;
+        vio->io_state = HVMIO_handle_pio_awaiting_completion;
         break;
     default:
         gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
@@ -250,6 +254,7 @@
 void hvm_io_assist(void)
 {
     struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     ioreq_t *p = get_ioreq(curr);
     enum hvm_io_state io_state;
 
@@ -257,23 +262,23 @@
 
     p->state = STATE_IOREQ_NONE;
 
-    io_state = curr->arch.hvm_vcpu.io_state;
-    curr->arch.hvm_vcpu.io_state = HVMIO_none;
+    io_state = vio->io_state;
+    vio->io_state = HVMIO_none;
 
     switch ( io_state )
     {
     case HVMIO_awaiting_completion:
-        curr->arch.hvm_vcpu.io_state = HVMIO_completed;
-        curr->arch.hvm_vcpu.io_data = p->data;
+        vio->io_state = HVMIO_completed;
+        vio->io_data = p->data;
         break;
     case HVMIO_handle_mmio_awaiting_completion:
-        curr->arch.hvm_vcpu.io_state = HVMIO_completed;
-        curr->arch.hvm_vcpu.io_data = p->data;
+        vio->io_state = HVMIO_completed;
+        vio->io_data = p->data;
         (void)handle_mmio();
         break;
     case HVMIO_handle_pio_awaiting_completion:
         memcpy(&guest_cpu_user_regs()->eax,
-               &p->data, curr->arch.hvm_vcpu.io_size);
+               &p->data, vio->io_size);
         break;
     default:
         break;
diff -r 8943a9696358 -r 51a0c81a4030 xen/arch/x86/hvm/svm/nestedsvm.c
--- a/xen/arch/x86/hvm/svm/nestedsvm.c  Tue Oct 25 16:46:47 2011 +0100
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c  Tue Oct 25 16:48:17 2011 +0100
@@ -1168,7 +1168,7 @@
          * Delay the injection because this would result in delivering
          * an interrupt *within* the execution of an instruction.
          */
-        if ( v->arch.hvm_vcpu.io_state != HVMIO_none )
+        if ( v->arch.hvm_vcpu.hvm_io.io_state != HVMIO_none )
             return hvm_intblk_shadow;
     }
 
diff -r 8943a9696358 -r 51a0c81a4030 xen/arch/x86/hvm/vmx/realmode.c
--- a/xen/arch/x86/hvm/vmx/realmode.c   Tue Oct 25 16:46:47 2011 +0100
+++ b/xen/arch/x86/hvm/vmx/realmode.c   Tue Oct 25 16:48:17 2011 +0100
@@ -172,6 +172,7 @@
     struct vcpu *curr = current;
     struct hvm_emulate_ctxt hvmemul_ctxt;
     struct segment_register *sreg;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     unsigned long intr_info;
     unsigned int emulations = 0;
 
@@ -182,7 +183,7 @@
 
     hvm_emulate_prepare(&hvmemul_ctxt, regs);
 
-    if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed )
+    if ( vio->io_state == HVMIO_completed )
         realmode_emulate_one(&hvmemul_ctxt);
 
     /* Only deliver interrupts into emulated real mode. */
@@ -196,7 +197,7 @@
     curr->arch.hvm_vmx.vmx_emulate = 1;
     while ( curr->arch.hvm_vmx.vmx_emulate &&
             !softirq_pending(smp_processor_id()) &&
-            (curr->arch.hvm_vcpu.io_state == HVMIO_none) )
+            (vio->io_state == HVMIO_none) )
     {
         /*
          * Check for pending interrupts only every 16 instructions, because
@@ -221,7 +222,7 @@
     }
 
     /* Need to emulate next time if we've started an IO operation */
-    if ( curr->arch.hvm_vcpu.io_state != HVMIO_none )
+    if ( vio->io_state != HVMIO_none )
         curr->arch.hvm_vmx.vmx_emulate = 1;
 
     if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode )
diff -r 8943a9696358 -r 51a0c81a4030 xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Tue Oct 25 16:46:47 2011 +0100
+++ b/xen/include/asm-x86/hvm/vcpu.h    Tue Oct 25 16:48:17 2011 +0100
@@ -44,6 +44,30 @@
     uint32_t asid;
 };
 
+struct hvm_vcpu_io {
+    /* I/O request in flight to device model. */
+    enum hvm_io_state   io_state;
+    unsigned long       io_data;
+    int                 io_size;
+
+    /*
+     * HVM emulation:
+     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+     *  The latter is known to be an MMIO frame (not RAM).
+     *  This translation is only valid if @mmio_gva is non-zero.
+     */
+    unsigned long       mmio_gva;
+    unsigned long       mmio_gpfn;
+
+    /* We may read up to m128 as a number of device-model transactions. */
+    paddr_t mmio_large_read_pa;
+    uint8_t mmio_large_read[16];
+    unsigned int mmio_large_read_bytes;
+    /* We may write up to m128 as a number of device-model transactions. */
+    paddr_t mmio_large_write_pa;
+    unsigned int mmio_large_write_bytes;
+};
+
 #define VMCX_EADDR    (~0ULL)
 
 struct nestedvcpu {
@@ -135,31 +159,11 @@
     /* Which cache mode is this VCPU in (CR0:CD/NW)? */
     u8                  cache_mode;
 
-    /* I/O request in flight to device model. */
-    enum hvm_io_state   io_state;
-    unsigned long       io_data;
-    int                 io_size;
-
-    /*
-     * HVM emulation:
-     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
-     *  The latter is known to be an MMIO frame (not RAM).
-     *  This translation is only valid if @mmio_gva is non-zero.
-     */
-    unsigned long       mmio_gva;
-    unsigned long       mmio_gpfn;
+    struct hvm_vcpu_io  hvm_io;
 
     /* Callback into x86_emulate when emulating FPU/MMX/XMM instructions. */
     void (*fpu_exception_callback)(void *, struct cpu_user_regs *);
     void *fpu_exception_callback_arg;
-    /* We may read up to m128 as a number of device-model transactions. */
-    paddr_t mmio_large_read_pa;
-    uint8_t mmio_large_read[16];
-    unsigned int mmio_large_read_bytes;
-    /* We may write up to m128 as a number of device-model transactions. */
-    paddr_t mmio_large_write_pa;
-    unsigned int mmio_large_write_bytes;
-
     /* Pending hw/sw interrupt */
     int           inject_trap;       /* -1 for nothing to inject */
     int           inject_error_code;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] hvm: Clean up I/O emulation, Xen patchbot-unstable <=