WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [PATCH 2/2] Add multi-page ring support to xen-blkfront

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
---
 drivers/block/xen-blkfront.c |  137 ++++++++++++++++++++++++++++++++----------
 1 files changed, 105 insertions(+), 32 deletions(-)

diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 837b992..1aaa5a4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -67,7 +67,25 @@ struct blk_shadow {
 
 static struct block_device_operations xlvbd_block_fops;
 
-#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+#define BLK_RING_SIZE(_order) \
+       __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE<<(_order))
+
+#define BLKIF_MAX_RING_PAGE_ORDER 2
+#define MAX_BLK_RING_SIZE BLK_RING_SIZE(BLKIF_MAX_RING_PAGE_ORDER)
+
+#define BLKIF_MAX_RING_PAGES (1<<BLKIF_MAX_RING_PAGE_ORDER)
+
+/*
+ * Allow multi page shared ring to be used if the capability is advertised by
+ * the back end.
+ */
+static int allow_multi_page_ring = 1;
+module_param_named(allow_multi_page_ring,
+                  allow_multi_page_ring,
+                  int,
+                  0);
+MODULE_PARM_DESC(allow_multi_page_ring,
+                "Allow multi page shared ring to be used");
 
 /*
  * We have one of these per vbd, whether ide, scsi or 'other'.  They
@@ -81,14 +99,15 @@ struct blkfront_info
        int vdevice;
        blkif_vdev_t handle;
        enum blkif_state connected;
-       int ring_ref;
+       int ring_ref[BLKIF_MAX_RING_PAGES];
+       int ring_order;
        struct blkif_front_ring ring;
        struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int evtchn, irq;
        struct request_queue *rq;
        struct work_struct work;
        struct gnttab_free_callback callback;
-       struct blk_shadow shadow[BLK_RING_SIZE];
+       struct blk_shadow shadow[MAX_BLK_RING_SIZE];
        unsigned long shadow_free;
        int feature_barrier;
        int is_ready;
@@ -102,8 +121,6 @@ struct blkfront_info
 
 static DEFINE_SPINLOCK(blkif_io_lock);
 
-#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
-       (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
 #define GRANT_INVALID_REF      0
 
 #define PARTS_PER_DISK         16
@@ -122,7 +139,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
 static int get_id_from_freelist(struct blkfront_info *info)
 {
        unsigned long free = info->shadow_free;
-       BUG_ON(free >= BLK_RING_SIZE);
+       BUG_ON(free >= MAX_BLK_RING_SIZE);
        info->shadow_free = info->shadow[free].req.id;
        info->shadow[free].req.id = 0x0fffffee; /* debug */
        return free;
@@ -495,6 +512,9 @@ static void blkif_restart_queue(struct work_struct *work)
 
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
+       int i;
+       unsigned long sring = (unsigned long)info->ring.sring;
+
        /* Prevent new requests being issued until we fix things up. */
        spin_lock_irq(&blkif_io_lock);
        info->connected = suspend ?
@@ -510,12 +530,14 @@ static void blkif_free(struct blkfront_info *info, int 
suspend)
        flush_scheduled_work();
 
        /* Free resources associated with old device channel. */
-       if (info->ring_ref != GRANT_INVALID_REF) {
-               gnttab_end_foreign_access(info->ring_ref, 0,
-                                         (unsigned long)info->ring.sring);
-               info->ring_ref = GRANT_INVALID_REF;
-               info->ring.sring = NULL;
+       for (i = 0; i < (1<<info->ring_order); i++) {
+               if (info->ring_ref[i] != GRANT_INVALID_REF) {
+                       gnttab_end_foreign_access(info->ring_ref[i], 0,
+                                                 sring + (i<<PAGE_SHIFT));
+                       info->ring_ref[i] = GRANT_INVALID_REF;
+               }
        }
+       info->ring.sring = NULL;
        if (info->irq)
                unbind_from_irqhandler(info->irq, info);
        info->evtchn = info->irq = 0;
@@ -605,28 +627,35 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 static int setup_blkring(struct xenbus_device *dev,
                         struct blkfront_info *info)
 {
-       struct blkif_sring *sring;
+       int i;
+       unsigned long sring;
        int err;
 
-       info->ring_ref = GRANT_INVALID_REF;
+       for (i = 0; i < (1<<info->ring_order); i++)
+               info->ring_ref[i] = GRANT_INVALID_REF;
 
-       sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH);
+       sring = __get_free_pages(GFP_NOIO | __GFP_HIGH, info->ring_order);
        if (!sring) {
                xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
                return -ENOMEM;
        }
-       SHARED_RING_INIT(sring);
-       FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+       SHARED_RING_INIT((struct blkif_sring *)sring);
+       FRONT_RING_INIT(&info->ring, (struct blkif_sring *)sring,
+                       PAGE_SIZE<<info->ring_order);
 
        sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
-       err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
-       if (err < 0) {
-               free_page((unsigned long)sring);
-               info->ring.sring = NULL;
-               goto fail;
+       for (i = 0; i < (1<<info->ring_order); i++) {
+               unsigned long addr = sring + (i<<PAGE_SHIFT);
+
+               err = xenbus_grant_ring(dev, virt_to_mfn(addr));
+               if (err < 0) {
+                       free_pages(sring, info->ring_order);
+                       info->ring.sring = NULL;
+                       goto fail;
+               }
+               info->ring_ref[i] = err;
        }
-       info->ring_ref = err;
 
        err = xenbus_alloc_evtchn(dev, &info->evtchn);
        if (err)
@@ -648,6 +677,9 @@ fail:
        return err;
 }
 
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
 
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_blkback(struct xenbus_device *dev,
@@ -655,8 +687,24 @@ static int talk_to_blkback(struct xenbus_device *dev,
 {
        const char *message = NULL;
        struct xenbus_transaction xbt;
+       int ring_order;
+       int single_page;
        int err;
 
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "max-ring-page-order", "%u", &ring_order,
+                           NULL);
+       if (err || !allow_multi_page_ring) {
+               single_page = 1;
+               info->ring_order = 0;
+               dev_dbg(&dev->dev, "using single-page handshake\n");
+       } else {
+               single_page = 0;
+               info->ring_order = MIN(ring_order, BLKIF_MAX_RING_PAGE_ORDER);
+               dev_dbg(&dev->dev, "using multi-page handshake (%d pages)\n",
+                       (1<<info->ring_order));
+       }
+
        /* Create shared ring, alloc event channel. */
        err = setup_blkring(dev, info);
        if (err)
@@ -669,18 +717,43 @@ again:
                goto destroy_blkring;
        }
 
-       err = xenbus_printf(xbt, dev->nodename,
-                           "ring-ref", "%u", info->ring_ref);
-       if (err) {
-               message = "writing ring-ref";
-               goto abort_transaction;
+       if (single_page) {
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "ring-ref", "%u", info->ring_ref[0]);
+               if (err) {
+                       message = "writing ring-ref";
+                       goto abort_transaction;
+               }
+       } else {
+               int i;
+
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "ring-page-order", "%u", info->ring_order);
+               if (err) {
+                       message = "writing ring-page-order";
+                       goto abort_transaction;
+               }
+
+               for (i = 0; i < (1<<info->ring_order); i++) {
+                       char node[10];
+
+                       snprintf(node, sizeof(node), "ring-ref%u", i);
+                       err = xenbus_printf(xbt, dev->nodename, node, "%u",
+                                           info->ring_ref[i]);
+                       if (err) {
+                               message = "writing ring-ref";
+                               goto abort_transaction;
+                       }
+               }
        }
+
        err = xenbus_printf(xbt, dev->nodename,
                            "event-channel", "%u", info->evtchn);
        if (err) {
                message = "writing event-channel";
                goto abort_transaction;
        }
+
        err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
                            XEN_IO_PROTO_ABI_NATIVE);
        if (err) {
@@ -747,9 +820,9 @@ static int blkfront_probe(struct xenbus_device *dev,
        info->connected = BLKIF_STATE_DISCONNECTED;
        INIT_WORK(&info->work, blkif_restart_queue);
 
-       for (i = 0; i < BLK_RING_SIZE; i++)
+       for (i = 0; i < MAX_BLK_RING_SIZE; i++)
                info->shadow[i].req.id = i+1;
-       info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+       info->shadow[MAX_BLK_RING_SIZE-1].req.id = 0x0fffffff;
 
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@ -782,13 +855,13 @@ static int blkif_recover(struct blkfront_info *info)
 
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
-       for (i = 0; i < BLK_RING_SIZE; i++)
+       for (i = 0; i < MAX_BLK_RING_SIZE; i++)
                info->shadow[i].req.id = i+1;
        info->shadow_free = info->ring.req_prod_pvt;
-       info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+       info->shadow[MAX_BLK_RING_SIZE-1].req.id = 0x0fffffff;
 
        /* Stage 3: Find pending requests and requeue them. */
-       for (i = 0; i < BLK_RING_SIZE; i++) {
+       for (i = 0; i < MAX_BLK_RING_SIZE; i++) {
                /* Not in use? */
                if (copy[i].request == 0)
                        continue;
-- 
1.5.6.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel