WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] x86_emulate: Clean up HVM emulated I/O ha

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] x86_emulate: Clean up HVM emulated I/O handling.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 27 Mar 2008 05:30:27 -0700
Delivery-date: Thu, 27 Mar 2008 05:30:51 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1206558057 0
# Node ID 2e84414ea14a384c3b3d4da0125d30dea9f62de5
# Parent  89121c8b3c0d0713131d0494f1f76155e4f480de
x86_emulate: Clean up HVM emulated I/O handling.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c      |  182 +++++++++++++++++++---------------------
 xen/arch/x86/hvm/io.c           |   93 +++-----------------
 xen/arch/x86/hvm/vmx/realmode.c |    4 
 xen/include/asm-x86/hvm/io.h    |    5 -
 xen/include/asm-x86/hvm/vcpu.h  |   12 +-
 5 files changed, 115 insertions(+), 181 deletions(-)

diff -r 89121c8b3c0d -r 2e84414ea14a xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Wed Mar 26 15:50:45 2008 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Wed Mar 26 19:00:57 2008 +0000
@@ -18,6 +18,77 @@
 #include <asm/hvm/emulate.h>
 #include <asm/hvm/hvm.h>
 #include <asm/hvm/support.h>
+
+static int hvmemul_do_io(
+    int is_mmio, paddr_t addr, unsigned long count, int size,
+    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+{
+    struct vcpu *curr = current;
+    vcpu_iodata_t *vio = get_ioreq(curr);
+    ioreq_t *p = &vio->vp_ioreq;
+
+    switch ( curr->arch.hvm_vcpu.io_state )
+    {
+    case HVMIO_none:
+        break;
+    case HVMIO_completed:
+        curr->arch.hvm_vcpu.io_state = HVMIO_none;
+        if ( val == NULL )
+            return X86EMUL_UNHANDLEABLE;
+        *val = curr->arch.hvm_vcpu.io_data;
+        return X86EMUL_OKAY;
+    default:
+        return X86EMUL_UNHANDLEABLE;
+    }
+
+    curr->arch.hvm_vcpu.io_state =
+        (val == NULL) ? HVMIO_dispatched : HVMIO_awaiting_completion;
+
+    if ( p->state != STATE_IOREQ_NONE )
+        gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
+                 p->state);
+
+    p->dir = dir;
+    p->data_is_ptr = value_is_ptr;
+    p->type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO;
+    p->size = size;
+    p->addr = addr;
+    p->count = count;
+    p->df = df;
+    p->data = value;
+    p->io_count++;
+
+    if ( is_mmio
+         ? (hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p))
+         : hvm_portio_intercept(p) )
+    {
+        p->state = STATE_IORESP_READY;
+        hvm_io_assist();
+        if ( val != NULL )
+            *val = curr->arch.hvm_vcpu.io_data;
+        curr->arch.hvm_vcpu.io_state = HVMIO_none;
+        return X86EMUL_OKAY;
+    }
+
+    hvm_send_assist_req(curr);
+    return (val != NULL) ? X86EMUL_RETRY : X86EMUL_OKAY;
+}
+
+static int hvmemul_do_pio(
+    unsigned long port, unsigned long count, int size,
+    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+{
+    return hvmemul_do_io(0, port, count, size, value,
+                         dir, df, value_is_ptr, val);
+}
+
+static int hvmemul_do_mmio(
+    paddr_t gpa, unsigned long count, int size,
+    paddr_t value, int dir, int df, int value_is_ptr, unsigned long *val)
+{
+    return hvmemul_do_io(1, gpa, count, size, value,
+                         dir, df, value_is_ptr, val);
+}
 
 /*
  * Convert addr from linear to physical form, valid over the range
@@ -161,7 +232,6 @@ static int __hvmemul_read(
 
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
-        struct vcpu *curr = current;
         unsigned long reps = 1;
         paddr_t gpa;
 
@@ -173,21 +243,7 @@ static int __hvmemul_read(
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        if ( curr->arch.hvm_vcpu.io_in_progress )
-            return X86EMUL_UNHANDLEABLE;
-
-        if ( !curr->arch.hvm_vcpu.io_completed )
-        {
-            curr->arch.hvm_vcpu.io_in_progress = 1;
-            send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, bytes,
-                          0, IOREQ_READ, 0, 0);
-        }
-
-        if ( !curr->arch.hvm_vcpu.io_completed )
-            return X86EMUL_RETRY;
-
-        *val = curr->arch.hvm_vcpu.io_data;
-        curr->arch.hvm_vcpu.io_completed = 0;
+        return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
     }
 
     return X86EMUL_OKAY;
@@ -251,7 +307,6 @@ static int hvmemul_write(
 
     if ( rc == HVMCOPY_bad_gfn_to_mfn )
     {
-        struct vcpu *curr = current;
         unsigned long reps = 1;
         paddr_t gpa;
 
@@ -260,12 +315,7 @@ static int hvmemul_write(
         if ( rc != X86EMUL_OKAY )
             return rc;
 
-        if ( curr->arch.hvm_vcpu.io_in_progress )
-            return X86EMUL_UNHANDLEABLE;
-
-        curr->arch.hvm_vcpu.io_in_progress = 1;
-        send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, bytes,
-                      val, IOREQ_WRITE, 0, 0);
+        return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
     }
 
     return X86EMUL_OKAY;
@@ -293,7 +343,6 @@ static int hvmemul_rep_ins(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long addr;
     paddr_t gpa;
     int rc;
@@ -309,14 +358,8 @@ static int hvmemul_rep_ins(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( curr->arch.hvm_vcpu.io_in_progress )
-        return X86EMUL_UNHANDLEABLE;
-
-    curr->arch.hvm_vcpu.io_in_progress = 1;
-    send_pio_req(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ,
-                 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
-
-    return X86EMUL_OKAY;
+    return hvmemul_do_pio(src_port, *reps, bytes_per_rep, gpa, IOREQ_READ,
+                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
 }
 
 static int hvmemul_rep_outs(
@@ -329,7 +372,6 @@ static int hvmemul_rep_outs(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long addr;
     paddr_t gpa;
     int rc;
@@ -345,15 +387,8 @@ static int hvmemul_rep_outs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( curr->arch.hvm_vcpu.io_in_progress )
-        return X86EMUL_UNHANDLEABLE;
-
-    curr->arch.hvm_vcpu.io_in_progress = 1;
-    send_pio_req(dst_port, *reps, bytes_per_rep,
-                 gpa, IOREQ_WRITE,
-                 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
-
-    return X86EMUL_OKAY;
+    return hvmemul_do_pio(dst_port, *reps, bytes_per_rep, gpa, IOREQ_WRITE,
+                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
 }
 
 static int hvmemul_rep_movs(
@@ -367,7 +402,6 @@ static int hvmemul_rep_movs(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long saddr, daddr;
     paddr_t sgpa, dgpa;
     p2m_type_t p2mt;
@@ -395,29 +429,18 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( curr->arch.hvm_vcpu.io_in_progress )
-        return X86EMUL_UNHANDLEABLE;
-
     (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) )
-    {
-        curr->arch.hvm_vcpu.io_in_progress = 1;
-        send_mmio_req(IOREQ_TYPE_COPY, sgpa, *reps, bytes_per_rep,
-                      dgpa, IOREQ_READ,
-                      !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
-    }
-    else
-    {
-        (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
-        if ( p2m_is_ram(p2mt) )
-            return X86EMUL_UNHANDLEABLE;
-        curr->arch.hvm_vcpu.io_in_progress = 1;
-        send_mmio_req(IOREQ_TYPE_COPY, dgpa, *reps, bytes_per_rep,
-                      sgpa, IOREQ_WRITE,
-                      !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
-    }
-
-    return X86EMUL_OKAY;
+        return hvmemul_do_mmio(
+            sgpa, *reps, bytes_per_rep, dgpa, IOREQ_READ,
+            !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
+
+    (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
+    if ( p2m_is_ram(p2mt) )
+        return X86EMUL_UNHANDLEABLE;
+    return hvmemul_do_mmio(
+        dgpa, *reps, bytes_per_rep, sgpa, IOREQ_WRITE,
+        !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1, NULL);
 }
 
 static int hvmemul_read_segment(
@@ -453,24 +476,7 @@ static int hvmemul_read_io(
     unsigned long *val,
     struct x86_emulate_ctxt *ctxt)
 {
-    struct vcpu *curr = current;
-
-    if ( curr->arch.hvm_vcpu.io_in_progress )
-        return X86EMUL_UNHANDLEABLE;
-
-    if ( !curr->arch.hvm_vcpu.io_completed )
-    {
-        curr->arch.hvm_vcpu.io_in_progress = 1;
-        send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
-    }
-
-    if ( !curr->arch.hvm_vcpu.io_completed )
-        return X86EMUL_RETRY;
-
-    *val = curr->arch.hvm_vcpu.io_data;
-    curr->arch.hvm_vcpu.io_completed = 0;
-
-    return X86EMUL_OKAY;
+    return hvmemul_do_pio(port, 1, bytes, 0, IOREQ_READ, 0, 0, val);
 }
 
 static int hvmemul_write_io(
@@ -479,21 +485,13 @@ static int hvmemul_write_io(
     unsigned long val,
     struct x86_emulate_ctxt *ctxt)
 {
-    struct vcpu *curr = current;
-
     if ( port == 0xe9 )
     {
-        hvm_print_line(curr, val);
+        hvm_print_line(current, val);
         return X86EMUL_OKAY;
     }
 
-    if ( curr->arch.hvm_vcpu.io_in_progress )
-        return X86EMUL_UNHANDLEABLE;
-
-    curr->arch.hvm_vcpu.io_in_progress = 1;
-    send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
-
-    return X86EMUL_OKAY;
+    return hvmemul_do_pio(port, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
 }
 
 static int hvmemul_read_cr(
diff -r 89121c8b3c0d -r 2e84414ea14a xen/arch/x86/hvm/io.c
--- a/xen/arch/x86/hvm/io.c     Wed Mar 26 15:50:45 2008 +0000
+++ b/xen/arch/x86/hvm/io.c     Wed Mar 26 19:00:57 2008 +0000
@@ -123,73 +123,6 @@ int hvm_buffered_io_send(ioreq_t *p)
     return 1;
 }
 
-void send_pio_req(unsigned long port, unsigned long count, int size,
-                  paddr_t value, int dir, int df, int value_is_ptr)
-{
-    struct vcpu *v = current;
-    vcpu_iodata_t *vio = get_ioreq(v);
-    ioreq_t *p = &vio->vp_ioreq;
-
-    if ( p->state != STATE_IOREQ_NONE )
-        gdprintk(XENLOG_WARNING,
-                 "WARNING: send pio with something already pending (%d)?\n",
-                 p->state);
-
-    p->dir = dir;
-    p->data_is_ptr = value_is_ptr;
-    p->type = IOREQ_TYPE_PIO;
-    p->size = size;
-    p->addr = port;
-    p->count = count;
-    p->df = df;
-    p->data = value;
-    p->io_count++;
-
-    if ( hvm_portio_intercept(p) )
-    {
-        p->state = STATE_IORESP_READY;
-        hvm_io_assist();
-    }
-    else
-    {
-        hvm_send_assist_req(v);
-    }
-}
-
-void send_mmio_req(unsigned char type, paddr_t gpa,
-                   unsigned long count, int size, paddr_t value,
-                   int dir, int df, int value_is_ptr)
-{
-    struct vcpu *v = current;
-    vcpu_iodata_t *vio = get_ioreq(v);
-    ioreq_t *p = &vio->vp_ioreq;
-
-    if ( p->state != STATE_IOREQ_NONE )
-        gdprintk(XENLOG_WARNING,
-                 "WARNING: send mmio with something already pending (%d)?\n",
-                 p->state);
-
-    p->dir = dir;
-    p->data_is_ptr = value_is_ptr;
-    p->type = type;
-    p->size = size;
-    p->addr = gpa;
-    p->count = count;
-    p->df = df;
-    p->data = value;
-    p->io_count++;
-
-    if ( hvm_mmio_intercept(p) || hvm_buffered_io_intercept(p) )
-    {
-        p->state = STATE_IORESP_READY;
-        hvm_io_assist();
-    }
-    else
-    {
-        hvm_send_assist_req(v);
-    }
-}
-
 void send_timeoffset_req(unsigned long timeoff)
 {
     ioreq_t p[1];
@@ -248,6 +181,9 @@ int handle_mmio(void)
     hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
 
     rc = hvm_emulate_one(&ctxt);
+
+    if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
+       curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
 
     switch ( rc )
     {
@@ -271,8 +207,6 @@ int handle_mmio(void)
 
     hvm_emulate_writeback(&ctxt);
 
-    curr->arch.hvm_vcpu.mmio_in_progress = curr->arch.hvm_vcpu.io_in_progress;
-
     return 1;
 }
 
@@ -280,6 +214,7 @@ void hvm_io_assist(void)
 {
     struct vcpu *v = current;
     ioreq_t *p = &get_ioreq(v)->vp_ioreq;
+    enum hvm_io_state io_state;
 
     if ( p->state != STATE_IORESP_READY )
     {
@@ -292,16 +227,16 @@ void hvm_io_assist(void)
 
     p->state = STATE_IOREQ_NONE;
 
-    if ( v->arch.hvm_vcpu.io_in_progress )
-    {
-        v->arch.hvm_vcpu.io_in_progress = 0;
-        if ( (p->dir == IOREQ_READ) && !p->data_is_ptr )
-        {
-            v->arch.hvm_vcpu.io_completed = 1;
-            v->arch.hvm_vcpu.io_data = p->data;
-            if ( v->arch.hvm_vcpu.mmio_in_progress )
-                (void)handle_mmio();
-        }
+    io_state = v->arch.hvm_vcpu.io_state;
+    v->arch.hvm_vcpu.io_state = HVMIO_none;
+
+    if ( (io_state == HVMIO_awaiting_completion) ||
+         (io_state == HVMIO_handle_mmio_awaiting_completion) )
+    {
+        v->arch.hvm_vcpu.io_state = HVMIO_completed;
+        v->arch.hvm_vcpu.io_data = p->data;
+        if ( io_state == HVMIO_handle_mmio_awaiting_completion )
+            (void)handle_mmio();
     }
 
  out:
diff -r 89121c8b3c0d -r 2e84414ea14a xen/arch/x86/hvm/vmx/realmode.c
--- a/xen/arch/x86/hvm/vmx/realmode.c   Wed Mar 26 15:50:45 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/realmode.c   Wed Mar 26 19:00:57 2008 +0000
@@ -190,7 +190,7 @@ void vmx_realmode(struct cpu_user_regs *
 
     hvm_emulate_prepare(&hvmemul_ctxt, regs);
 
-    if ( curr->arch.hvm_vcpu.io_completed )
+    if ( curr->arch.hvm_vcpu.io_state == HVMIO_completed )
         realmode_emulate_one(&hvmemul_ctxt);
 
     /* Only deliver interrupts into emulated real mode. */
@@ -203,7 +203,7 @@ void vmx_realmode(struct cpu_user_regs *
 
     while ( curr->arch.hvm_vmx.vmxemul &&
             !softirq_pending(smp_processor_id()) &&
-            !curr->arch.hvm_vcpu.io_in_progress )
+            (curr->arch.hvm_vcpu.io_state == HVMIO_none) )
     {
         /*
          * Check for pending interrupts only every 16 instructions, because
diff -r 89121c8b3c0d -r 2e84414ea14a xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Wed Mar 26 15:50:45 2008 +0000
+++ b/xen/include/asm-x86/hvm/io.h      Wed Mar 26 19:00:57 2008 +0000
@@ -96,11 +96,6 @@ static inline int register_buffered_io_h
     return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
 }
 
-void send_mmio_req(unsigned char type, paddr_t gpa,
-                   unsigned long count, int size, paddr_t value,
-                   int dir, int df, int value_is_ptr);
-void send_pio_req(unsigned long port, unsigned long count, int size,
-                  paddr_t value, int dir, int df, int value_is_ptr);
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
diff -r 89121c8b3c0d -r 2e84414ea14a xen/include/asm-x86/hvm/vcpu.h
--- a/xen/include/asm-x86/hvm/vcpu.h    Wed Mar 26 15:50:45 2008 +0000
+++ b/xen/include/asm-x86/hvm/vcpu.h    Wed Mar 26 19:00:57 2008 +0000
@@ -28,6 +28,14 @@
 
 #define HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM          0
 #define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI     1
+
+enum hvm_io_state {
+    HVMIO_none = 0,
+    HVMIO_dispatched,
+    HVMIO_awaiting_completion,
+    HVMIO_handle_mmio_awaiting_completion,
+    HVMIO_completed
+};
 
 struct hvm_vcpu {
     /* Guest control-register and EFER values, just as the guest sees them. */
@@ -70,9 +78,7 @@ struct hvm_vcpu {
     u8                  cache_mode;
 
     /* I/O request in flight to device model. */
-    bool_t              mmio_in_progress;
-    bool_t              io_in_progress;
-    bool_t              io_completed;
+    enum hvm_io_state   io_state;
     unsigned long       io_data;
 };
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] x86_emulate: Clean up HVM emulated I/O handling., Xen patchbot-unstable <=