WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merge firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Merge firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Mon, 16 May 2005 20:21:53 +0000
Delivery-date: Mon, 16 May 2005 21:03:35 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
ChangeSet 1.1423, 2005/05/16 21:21:53+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx

        Merge firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
        into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk



 blkback.c |  381 ++++++++++++++++++++++++++++++++++++++++++--------------------
 1 files changed, 263 insertions(+), 118 deletions(-)


diff -Nru a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c 
b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c     2005-05-16 
17:04:12 -04:00
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c     2005-05-16 
17:04:12 -04:00
@@ -8,9 +8,14 @@
  *  arch/xen/drivers/blkif/frontend
  * 
  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Copyright (c) 2005, Christopher Clark
  */
 
 #include "common.h"
+#include <asm-xen/evtchn.h>
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+#include <asm-xen/xen-public/grant_table.h>
+#endif
 
 /*
  * These are rather arbitrary. They are fairly large because adjacent requests
@@ -25,13 +30,11 @@
 #define BATCH_PER_DOMAIN 16
 
 static unsigned long mmap_vstart;
-#define MMAP_PAGES_PER_REQUEST \
-    (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
-#define MMAP_PAGES             \
-    (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
-#define MMAP_VADDR(_req,_seg)                        \
-    (mmap_vstart +                                   \
-     ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+#define MMAP_PAGES                                              \
+    (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
+#define MMAP_VADDR(_req,_seg)                                   \
+    (mmap_vstart +                                              \
+     ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) +    \
      ((_seg) * PAGE_SIZE))
 
 /*
@@ -81,6 +84,29 @@
 }
 #endif
 
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+/* When using grant tables to map a frame for device access then the
+ * handle returned must be used to unmap the frame. This is needed to
+ * drop the ref count on the frame.
+ */
+static u16 pending_grant_handles[MMAP_PAGES];
+#define pending_handle(_idx, _i) \
+    (pending_grant_handles[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
+#define BLKBACK_INVALID_HANDLE (0xFFFF)
+#endif
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+/*
+ * If the tap driver is used, we may get pages belonging to either the tap
+ * or (more likely) the real frontend.  The backend must specify which domain
+ * a given page belongs to in update_va_mapping though.  For the moment, 
+ * the tap rewrites the ID field of the request to contain the request index
+ * and the id of the real front end domain.
+ */
+#define BLKTAP_COOKIE 0xbeadfeed
+static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
+#endif
+
 static int do_block_io_op(blkif_t *blkif, int max_to_do);
 static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
 static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
@@ -89,20 +115,42 @@
 
 static void fast_flush_area(int idx, int nr_pages)
 {
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+    gnttab_op_t       aop[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    unsigned int      i, invcount = 0;
+    u16               handle;
+
+    for ( i = 0; i < nr_pages; i++ )
+    {
+        if ( BLKBACK_INVALID_HANDLE != ( handle = pending_handle(idx, i) ) )
+        {
+            aop[i].u.unmap_grant_ref.host_virt_addr = MMAP_VADDR(idx, i);
+            aop[i].u.unmap_grant_ref.dev_bus_addr   = 0;
+            aop[i].u.unmap_grant_ref.handle         = handle;
+            pending_handle(idx, i) = BLKBACK_INVALID_HANDLE;
+            invcount++;
+        }
+    }
+    if ( unlikely(HYPERVISOR_grant_table_op(
+                    GNTTABOP_unmap_grant_ref, aop, invcount)))
+        BUG();
+#else
+
+    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
     int               i;
 
     for ( i = 0; i < nr_pages; i++ )
     {
         mcl[i].op = __HYPERVISOR_update_va_mapping;
-        mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
+        mcl[i].args[0] = MMAP_VADDR(idx, i);
         mcl[i].args[1] = 0;
         mcl[i].args[2] = 0;
     }
 
-    mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
+    mcl[nr_pages-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
     if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
         BUG();
+#endif
 }
 
 
@@ -281,17 +329,16 @@
 
 static int do_block_io_op(blkif_t *blkif, int max_to_do)
 {
-    blkif_ring_t *blk_ring = blkif->blk_ring_base;
+    blkif_back_ring_t *blk_ring = &blkif->blk_ring;
     blkif_request_t *req;
-    BLKIF_RING_IDX i, rp;
+    RING_IDX i, rp;
     int more_to_do = 0;
 
-    rp = blk_ring->req_prod;
+    rp = blk_ring->sring->req_prod;
     rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-    /* Take items off the comms ring, taking care not to overflow. */
-    for ( i = blkif->blk_req_cons; 
-          (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
+    for ( i = blk_ring->req_cons; 
+         (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
           i++ )
     {
         if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
@@ -300,7 +347,7 @@
             break;
         }
         
-        req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
+        req = RING_GET_REQUEST(blk_ring, i);
         switch ( req->operation )
         {
         case BLKIF_OP_READ:
@@ -314,14 +361,13 @@
 
         default:
             DPRINTK("error: unknown block io operation [%d]\n",
-                    blk_ring->ring[i].req.operation);
-            make_response(blkif, blk_ring->ring[i].req.id, 
-                          blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
+                    req->operation);
+            make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
             break;
         }
     }
 
-    blkif->blk_req_cons = i;
+    blk_ring->req_cons = i;
     return more_to_do;
 }
 
@@ -339,12 +385,50 @@
          (blkif_last_sect(req->frame_and_sects[0]) != 7) )
         goto out;
 
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+    {
+        gnttab_op_t     op;
+
+        op.u.map_grant_ref.host_virt_addr = MMAP_VADDR(pending_idx, 0);
+        op.u.map_grant_ref.flags = GNTMAP_host_map;
+        op.u.map_grant_ref.ref = blkif_gref_from_fas(req->frame_and_sects[0]);
+        op.u.map_grant_ref.dom = blkif->domid;
+
+        if ( unlikely(HYPERVISOR_grant_table_op(
+                        GNTTABOP_map_grant_ref, &op, 1)))
+            BUG();
+
+        if ( op.u.map_grant_ref.handle < 0 )
+            goto out;
+
+        pending_handle(pending_idx, 0) = op.u.map_grant_ref.handle;
+    }
+#else /* else CONFIG_XEN_BLKDEV_GRANT */
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+    /* Grab the real frontend out of the probe message. */
+    if (req->frame_and_sects[1] == BLKTAP_COOKIE) 
+        blkif->is_blktap = 1;
+#endif
+
+
+#ifdef CONFIG_XEN_BLKDEV_TAP_BE
     if ( HYPERVISOR_update_va_mapping_otherdomain(
-        MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
+        MMAP_VADDR(pending_idx, 0),
         (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
-        0, blkif->domid) )
+        0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) )
+        
         goto out;
-
+#else
+    if ( HYPERVISOR_update_va_mapping_otherdomain(
+        MMAP_VADDR(pending_idx, 0),
+        (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
+        0, blkif->domid) ) 
+        
+        goto out;
+#endif
+#endif /* endif CONFIG_XEN_BLKDEV_GRANT */
+   
     rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0), 
                     PAGE_SIZE / sizeof(vdisk_t));
 
@@ -357,113 +441,152 @@
 {
     extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]); 
     int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
-    short nr_sects;
-    unsigned long buffer, fas;
-    int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+    unsigned long fas = 0;
+    int i, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
     pending_req_t *pending_req;
-    unsigned long  remap_prot;
-    multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
-
-    /* We map virtual scatter/gather segments to physical segments. */
-    int new_segs, nr_psegs = 0;
-    phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
+#ifdef CONFIG_XEN_BLKDEV_GRANT
+    gnttab_op_t       aop[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+#else
+    unsigned long remap_prot;
+    multicall_entry_t mcl[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+#endif
+    struct phys_req preq;
+    struct { 
+        unsigned long buf; unsigned int nsec;
+    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    unsigned int nseg;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+    struct buffer_head *bh;
+#else
+    struct bio *bio = NULL, *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    int nbio = 0;
+    request_queue_t *q;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog