WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH] Fix stdvga performance for 32bit ops

To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] Fix stdvga performance for 32bit ops
From: Ben Guthro <bguthro@xxxxxxxxxxxxxxx>
Date: Wed, 31 Oct 2007 16:28:51 -0400
Cc: Robert Phillips <rphillips@xxxxxxxxxxxxxxx>
Delivery-date: Wed, 31 Oct 2007 13:29:31 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.5 (X11/20070719)
Corrected a bug in the stdvga code where it did not
properly handle 32 bit operations.
The buf_ioreq_t can now store 32 bits of data.
Because this increases its size to 8 bytes,
only 510 elements fit in the buffered_iopage
(down from 672 elements).

Signed-off-by: Robert Phillips <rphillips@xxxxxxxxxxxxxxx>
Signed-off-by: Ben Guthro <bguthro@xxxxxxxxxxxxxxx>
diff -r 15c6e8698fda tools/ioemu/target-i386-dm/helper2.c
--- a/tools/ioemu/target-i386-dm/helper2.c      Wed Oct 31 15:07:54 2007 -0400
+++ b/tools/ioemu/target-i386-dm/helper2.c      Wed Oct 31 15:08:02 2007 -0400
@@ -554,20 +554,17 @@ void __handle_buffered_iopage(CPUState *
                                       IOREQ_BUFFER_SLOT_NUM];
         req.size = 1UL << buf_req->size;
         req.count = 1;
+        req.addr = buf_req->addr;
         req.data = buf_req->data;
         req.state = STATE_IOREQ_READY;
         req.dir  = buf_req->dir;
         req.type = buf_req->type;
         qw = req.size == 8;
         if (qw) {
-            req.data |= ((uint64_t)buf_req->addr) << 16;
             buf_req = 
&buffered_io_page->buf_ioreq[(buffered_io_page->read_pointer+1) %
                                                IOREQ_BUFFER_SLOT_NUM];
             req.data |= ((uint64_t)buf_req->data) << 32;
-            req.data |= ((uint64_t)buf_req->addr) << 48;
-        }
-        else
-            req.addr = buf_req->addr;
+        }
 
         __handle_ioreq(env, &req);
 
diff -r 15c6e8698fda xen/arch/x86/hvm/intercept.c
--- a/xen/arch/x86/hvm/intercept.c      Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/arch/x86/hvm/intercept.c      Wed Oct 31 15:08:02 2007 -0400
@@ -163,8 +163,11 @@ int hvm_buffered_io_send(ioreq_t *p)
     BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
 
     /* Return 0 for the cases we can't deal with. */
-    if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1)
-        return 0;
+    if (p->addr > 0xffffful || p->data_is_ptr || p->df || p->count != 1) {
+        gdprintk(XENLOG_DEBUG, "slow ioreq.  type:%d size:%ld addr:0x%08lx 
dir:%d ptr:%d df:%d count:%ld\n",
+                 p->type, p->size, p->addr, !!p->dir, !!p->data_is_ptr, 
!!p->df, p->count);
+        return 0;
+    }
 
     bp.type = p->type;
     bp.dir  = p->dir;
@@ -181,7 +184,6 @@ int hvm_buffered_io_send(ioreq_t *p)
     case 8:
         bp.size = 3;
         qw = 1;
-        gdprintk(XENLOG_INFO, "quadword ioreq type:%d data:%ld\n", p->type, 
p->data);
         break;
     default:
         gdprintk(XENLOG_WARNING, "unexpected ioreq size:%ld\n", p->size);
@@ -189,7 +191,7 @@ int hvm_buffered_io_send(ioreq_t *p)
     }
     
     bp.data = p->data;
-    bp.addr = qw ? ((p->data >> 16) & 0xfffful) : (p->addr & 0xffffful);
+    bp.addr = p->addr;
     
     spin_lock(&iorp->lock);
 
@@ -205,7 +207,6 @@ int hvm_buffered_io_send(ioreq_t *p)
     
     if (qw) {
         bp.data = p->data >> 32;
-        bp.addr = (p->data >> 48) & 0xfffful;
         memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
                &bp, sizeof(bp));
     }
diff -r 15c6e8698fda xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/arch/x86/hvm/stdvga.c Wed Oct 31 15:08:02 2007 -0400
@@ -273,9 +273,12 @@ int stdvga_intercept_pio(ioreq_t *p)
     }
 
     spin_lock(&s->lock);
+
     if ( p->dir == IOREQ_READ ) {
         if (p->size != 1)
             gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
+        if (p->data_is_ptr)
+            gdprintk(XENLOG_WARNING, "unexpected data_is_ptr\n");
         if (!(p->addr == 0x3c5 && s->sr_index >= sizeof(sr_mask)) &&
             !(p->addr == 0x3cf && s->gr_index >= sizeof(gr_mask)))
         {
@@ -591,6 +594,9 @@ int stdvga_intercept_mmio(ioreq_t *p)
             s->cache = 0;
         }
     }
+    else
+        buf = p->dir == IOREQ_WRITE;
+    
     if (buf && hvm_buffered_io_send(p)) {
         UPDATE_STATS(p->dir == IOREQ_READ ? s->stats.nr_mmio_buffered_rd++ : 
s->stats.nr_mmio_buffered_wr++);
         spin_unlock(&s->lock);
diff -r 15c6e8698fda xen/include/public/hvm/ioreq.h
--- a/xen/include/public/hvm/ioreq.h    Wed Oct 31 15:07:54 2007 -0400
+++ b/xen/include/public/hvm/ioreq.h    Wed Oct 31 15:07:57 2007 -0400
@@ -82,13 +82,13 @@ struct buf_ioreq {
 struct buf_ioreq {
     uint8_t  type;   /*  I/O type                    */
     uint8_t  dir:1;  /*  1=read, 0=write             */
-    uint8_t  size:2; /*  0=>1, 1=>2, 3=>8. If 8 then use two contig buf_ioreqs 
*/
-    uint32_t addr:20; /*  physical address or high-order data */
-    uint16_t data;   /*  (low order) data            */
+    uint8_t  size:2; /*  0=>1, 1=>2, 2=>4, 3=>8. If 8 then use two contig 
buf_ioreqs */
+    uint32_t addr:20;/*  physical address            */
+    uint32_t data;   /*  data                        */
 };
 typedef struct buf_ioreq buf_ioreq_t;
 
-#define IOREQ_BUFFER_SLOT_NUM     672
+#define IOREQ_BUFFER_SLOT_NUM     510
 struct buffered_iopage {
     volatile unsigned int read_pointer;
     volatile unsigned int write_pointer;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>