WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 1/2] Add multi-page ring support to xen-blkback.

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 1/2] Add multi-page ring support to xen-blkback.
From: Paul Durrant <paul.durrant@xxxxxxxxxx>
Date: Tue, 12 Jan 2010 17:44:56 +0000
Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
Delivery-date: Tue, 12 Jan 2010 09:47:02 -0800
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1263318297-18527-1-git-send-email-paul.durrant@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1263318297-18527-1-git-send-email-paul.durrant@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
 drivers/xen/blkback/common.h    |   10 +++-
 drivers/xen/blkback/interface.c |  113 +++++++++++++++++++++++++++-----------
 drivers/xen/blkback/xenbus.c    |  100 ++++++++++++++++++++++++++++++-----
 3 files changed, 174 insertions(+), 49 deletions(-)

diff --git a/drivers/xen/blkback/common.h b/drivers/xen/blkback/common.h
index d12d4d5..6404a4a 100644
--- a/drivers/xen/blkback/common.h
+++ b/drivers/xen/blkback/common.h
@@ -58,6 +58,9 @@ struct vbd {
 
 struct backend_info;
 
+#define        BLKIF_MAX_RING_PAGE_ORDER       2
+#define        BLKIF_MAX_RING_PAGES            (1<<BLKIF_MAX_RING_PAGE_ORDER)
+
 typedef struct blkif_st {
        /* Unique identifier for this interface. */
        domid_t           domid;
@@ -92,14 +95,15 @@ typedef struct blkif_st {
 
        wait_queue_head_t waiting_to_free;
 
-       grant_handle_t shmem_handle;
-       grant_ref_t    shmem_ref;
+       unsigned int    nr_shared_pages;
+       grant_handle_t  shmem_handle[BLKIF_MAX_RING_PAGES];
 } blkif_t;
 
 blkif_t *blkif_alloc(domid_t domid);
 void blkif_disconnect(blkif_t *blkif);
 void blkif_free(blkif_t *blkif);
-int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
+int blkif_map(blkif_t *blkif, unsigned long shared_pages[],
+             unsigned int nr_shared_pages, unsigned int evtchn);
 
 #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
 #define blkif_put(_b)                                  \
diff --git a/drivers/xen/blkback/interface.c b/drivers/xen/blkback/interface.c
index e397a41..8951543 100644
--- a/drivers/xen/blkback/interface.c
+++ b/drivers/xen/blkback/interface.c
@@ -56,50 +56,93 @@ blkif_t *blkif_alloc(domid_t domid)
        return blkif;
 }
 
-static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
-{
-       struct gnttab_map_grant_ref op;
-
-       gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-                         GNTMAP_host_map, shared_page, blkif->domid);
+#define        INVALID_GRANT_HANDLE    ((grant_handle_t)~0U)
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
-               BUG();
-
-       if (op.status) {
-               DPRINTK(" Grant table operation failure !\n");
-               return op.status;
+static void unmap_frontend_pages(blkif_t *blkif)
+{
+       struct vm_struct *area = blkif->blk_ring_area;
+       struct gnttab_unmap_grant_ref op[BLKIF_MAX_RING_PAGES];
+       unsigned int i;
+       unsigned int j;
+
+       j = 0;
+       for (i = 0; i < blkif->nr_shared_pages; i++) {
+               unsigned long addr = (unsigned long)area->addr +
+                                    (i * PAGE_SIZE);
+
+               if (blkif->shmem_handle[i] != INVALID_GRANT_HANDLE) {
+                       gnttab_set_unmap_op(&op[j++], addr,
+                                           GNTMAP_host_map,
+                                           blkif->shmem_handle[i]);
+
+                       blkif->shmem_handle[i] = INVALID_GRANT_HANDLE;
+               }
        }
 
-       blkif->shmem_ref = shared_page;
-       blkif->shmem_handle = op.handle;
+       blkif->nr_shared_pages = 0;
 
-       return 0;
+       if (j != 0) {
+               if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+                                             op, j))
+                       BUG();
+       }
 }
 
-static void unmap_frontend_page(blkif_t *blkif)
+static int map_frontend_pages(blkif_t *blkif, unsigned long shared_pages[],
+                             unsigned int nr_shared_pages)
 {
-       struct gnttab_unmap_grant_ref op;
+       struct vm_struct *area = blkif->blk_ring_area;
+       struct gnttab_map_grant_ref op[BLKIF_MAX_RING_PAGES];
+       unsigned int i;
+       int status = 0;
 
-       gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
-                           GNTMAP_host_map, blkif->shmem_handle);
+       for (i = 0; i < nr_shared_pages; i++) {
+               unsigned long addr = (unsigned long)area->addr +
+                                    (i * PAGE_SIZE);
+
+               gnttab_set_map_op(&op[i], addr, GNTMAP_host_map,
+                                 shared_pages[i], blkif->domid);
+       }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+       if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op,
+                                     nr_shared_pages))
                BUG();
+
+       for (i = 0; i < nr_shared_pages; i++) {
+               status = op[i].status;
+               if (status != 0) {
+                       blkif->shmem_handle[i] = INVALID_GRANT_HANDLE;
+                       continue;
+               }
+
+               blkif->shmem_handle[i] = op[i].handle;
+       }
+
+       blkif->nr_shared_pages = nr_shared_pages;
+
+       if (status != 0) {
+               DPRINTK(" Grant table operation failure !\n");
+               unmap_frontend_pages(blkif);
+       }
+
+       return status;
 }
 
-int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
+int blkif_map(blkif_t *blkif, unsigned long shared_pages[],
+             unsigned int nr_shared_pages, unsigned int evtchn)
 {
+       unsigned long size = nr_shared_pages * PAGE_SIZE;
        int err;
 
        /* Already connected through? */
        if (blkif->irq)
                return 0;
 
-       if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
+       blkif->blk_ring_area = alloc_vm_area(size);
+       if (blkif->blk_ring_area == NULL)
                return -ENOMEM;
 
-       err = map_frontend_page(blkif, shared_page);
+       err = map_frontend_pages(blkif, shared_pages, nr_shared_pages);
        if (err) {
                free_vm_area(blkif->blk_ring_area);
                return err;
@@ -110,21 +153,21 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, 
unsigned int evtchn)
        {
                struct blkif_sring *sring;
                sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, size);
                break;
        }
        case BLKIF_PROTOCOL_X86_32:
        {
-               struct blkif_x86_32_sring *sring_x86_32;
-               sring_x86_32 = (struct blkif_x86_32_sring 
*)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, 
PAGE_SIZE);
+               struct blkif_x86_32_sring *sring;
+               sring = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring, size);
                break;
        }
        case BLKIF_PROTOCOL_X86_64:
        {
-               struct blkif_x86_64_sring *sring_x86_64;
-               sring_x86_64 = (struct blkif_x86_64_sring 
*)blkif->blk_ring_area->addr;
-               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, 
PAGE_SIZE);
+               struct blkif_x86_64_sring *sring;
+               sring = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring, size);
                break;
        }
        default:
@@ -132,14 +175,17 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, 
unsigned int evtchn)
        }
 
        err = bind_interdomain_evtchn_to_irqhandler(
-               blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
+               blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend",
+               blkif);
        if (err < 0)
        {
-               unmap_frontend_page(blkif);
+               unmap_frontend_pages(blkif);
                free_vm_area(blkif->blk_ring_area);
+               blkif->blk_ring_area = NULL;
                blkif->blk_rings.common.sring = NULL;
                return err;
        }
+
        blkif->irq = err;
 
        return 0;
@@ -162,8 +208,9 @@ void blkif_disconnect(blkif_t *blkif)
        }
 
        if (blkif->blk_rings.common.sring) {
-               unmap_frontend_page(blkif);
+               unmap_frontend_pages(blkif);
                free_vm_area(blkif->blk_ring_area);
+               blkif->blk_ring_area = NULL;
                blkif->blk_rings.common.sring = NULL;
        }
 }
diff --git a/drivers/xen/blkback/xenbus.c b/drivers/xen/blkback/xenbus.c
index 04c0a12..4ee10b8 100644
--- a/drivers/xen/blkback/xenbus.c
+++ b/drivers/xen/blkback/xenbus.c
@@ -42,6 +42,11 @@ static int connect_ring(struct backend_info *);
 static void backend_changed(struct xenbus_watch *, const char **,
                            unsigned int);
 
+/* Order of maximum shared ring size advertised to the front end. */
+static int blkif_max_ring_page_order;
+module_param_named(max_ring_page_order, blkif_max_ring_page_order, int, 0);
+MODULE_PARM_DESC(max_ring_page_order, "Order of maximum VM shared ring size");
+
 static int blkback_name(blkif_t *blkif, char *buf)
 {
        char *devpath, *devname;
@@ -243,6 +248,11 @@ static int blkback_probe(struct xenbus_device *dev,
        if (err)
                goto fail;
 
+       err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order",
+                           "%u", blkif_max_ring_page_order);
+       if (err)
+               goto fail;
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -469,22 +479,82 @@ again:
 static int connect_ring(struct backend_info *be)
 {
        struct xenbus_device *dev = be->dev;
-       unsigned long ring_ref;
        unsigned int evtchn;
+       unsigned int ring_order;
+       unsigned long ring_ref[BLKIF_MAX_RING_PAGES];
        char protocol[64] = "";
        int err;
 
        DPRINTK("%s", dev->otherend);
 
-       err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", 
&ring_ref,
-                           "event-channel", "%u", &evtchn, NULL);
-       if (err) {
-               xenbus_dev_fatal(dev, err,
-                                "reading %s/ring-ref and event-channel",
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u",
+                          &evtchn);
+       if (err != 1) {
+               err = -EINVAL;
+
+               xenbus_dev_fatal(dev, err, "reading %s/event-channel",
                                 dev->otherend);
                return err;
        }
 
+       printk(KERN_INFO "blkback: event-channel %u\n", evtchn);
+
+       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+                          &ring_order);
+       if (err != 1) {
+               DPRINTK("%s: using single page handshake", dev->otherend);
+
+               ring_order = 0;
+
+               err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref",
+                                  "%lu", &ring_ref[0]);
+               if (err != 1) {
+                       err = -EINVAL;
+
+                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
+                                        dev->otherend);
+                       return err;
+               }
+
+               printk(KERN_INFO "blkback: ring-ref %lu\n", ring_ref[0]);
+       } else {
+               unsigned int i;
+
+               if (ring_order > blkif_max_ring_page_order) {
+                       err = -EINVAL;
+
+                       xenbus_dev_fatal(dev, err,
+                                        "%s/ring-page-order too big",
+                                        dev->otherend);
+                       return err;
+               }
+
+               DPRINTK("%s: using %u page(s)", dev->otherend,
+                       (1 << ring_order));
+
+               for (i = 0; i < (1u << ring_order); i++) {
+                       char ring_ref_name[10];
+
+                       snprintf(ring_ref_name, sizeof(ring_ref_name),
+                                "ring-ref%1u", i);
+                       err = xenbus_scanf(XBT_NIL, dev->otherend,
+                                          ring_ref_name, "%lu",
+                                          &ring_ref[i]);
+                       if (err != 1) {
+                               err = -EINVAL;
+
+                               xenbus_dev_fatal(dev, err,
+                                                "reading %s/%s",
+                                                dev->otherend,
+                                                ring_ref_name);
+                               return err;
+                       }
+
+                       printk(KERN_INFO "blkback: ring-ref%u %lu\n", i,
+                              ring_ref[i]);
+               }
+       }
+
        be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
        err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
                            "%63s", protocol, NULL);
@@ -497,18 +567,19 @@ static int connect_ring(struct backend_info *be)
        else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
                be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
        else {
+               err = -EINVAL;
+
                xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
-               return -1;
+               return err;
        }
-       printk(KERN_INFO
-              "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
-              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
+       printk(KERN_INFO "blkback: protocol %d (%s)\n",
+              be->blkif->blk_protocol, protocol);
 
        /* Map the shared frame, irq etc. */
-       err = blkif_map(be->blkif, ring_ref, evtchn);
+       err = blkif_map(be->blkif, ring_ref, (1u << ring_order), evtchn);
        if (err) {
-               xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
-                                ring_ref, evtchn);
+               xenbus_dev_fatal(dev, err, "mapping ring-refs and evtchn");
                return err;
        }
 
@@ -537,5 +608,8 @@ static struct xenbus_driver blkback = {
 
 int blkif_xenbus_init(void)
 {
+       if (blkif_max_ring_page_order > BLKIF_MAX_RING_PAGE_ORDER)
+               blkif_max_ring_page_order = BLKIF_MAX_RING_PAGE_ORDER;
+
        return xenbus_register_backend(&blkback);
 }
-- 
1.5.6.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel