WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [linux-2.6.18-xen] xen: cleanups to blkback and blktap

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [linux-2.6.18-xen] xen: cleanups to blkback and blktap
From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
Date: Tue, 05 Apr 2011 12:50:06 +0100
Delivery-date: Tue, 05 Apr 2011 04:50:32 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxxxx>
# Date 1302003535 -3600
# Node ID b78a36ec9895c141b5d75ccbbbd23253640a138f
# Parent  13937900bbd01cd62592d3535ea4943c22ba2b92
xen: cleanups to blkback and blktap

Remove unused/unneccessary fields of their pending_req_t structures,
and reduce the width of those structures' nr_pages field.

Move loop-invariant grant table flags calculation out of loops (also
in scsiback).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
---


diff -r 13937900bbd0 -r b78a36ec9895 drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c     Tue Apr 05 12:38:20 2011 +0100
+++ b/drivers/xen/blkback/blkback.c     Tue Apr 05 12:38:55 2011 +0100
@@ -74,10 +74,9 @@
 typedef struct {
        blkif_t       *blkif;
        u64            id;
-       int            nr_pages;
        atomic_t       pendcnt;
+       unsigned short nr_pages;
        unsigned short operation;
-       int            status;
        struct list_head free_list;
 } pending_req_t;
 
@@ -255,22 +254,24 @@
 
 static void __end_block_io_op(pending_req_t *pending_req, int error)
 {
+       int status = BLKIF_RSP_OKAY;
+
        /* An error fails the entire request. */
        if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
            (error == -EOPNOTSUPP)) {
                DPRINTK("blkback: write barrier op failed, not supported\n");
                blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
-               pending_req->status = BLKIF_RSP_EOPNOTSUPP;
+               status = BLKIF_RSP_EOPNOTSUPP;
        } else if (error) {
                DPRINTK("Buffer not up-to-date at end of operation, "
                        "error=%d\n", error);
-               pending_req->status = BLKIF_RSP_ERROR;
+               status = BLKIF_RSP_ERROR;
        }
 
        if (atomic_dec_and_test(&pending_req->pendcnt)) {
                fast_flush_area(pending_req);
                make_response(pending_req->blkif, pending_req->id,
-                             pending_req->operation, pending_req->status);
+                             pending_req->operation, status);
                blkif_put(pending_req->blkif);
                free_req(pending_req);
        }
@@ -390,7 +391,6 @@
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
 {
-       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct phys_req preq;
        struct { 
@@ -398,6 +398,7 @@
        } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int nseg;
        struct bio *bio = NULL;
+       uint32_t flags;
        int ret, i;
        int operation;
 
@@ -431,12 +432,13 @@
        pending_req->blkif     = blkif;
        pending_req->id        = req->id;
        pending_req->operation = req->operation;
-       pending_req->status    = BLKIF_RSP_OKAY;
        pending_req->nr_pages  = nseg;
 
+       flags = GNTMAP_host_map;
+       if (operation != READ)
+               flags |= GNTMAP_readonly;
+
        for (i = 0; i < nseg; i++) {
-               uint32_t flags;
-
                seg[i].nsec = req->seg[i].last_sect -
                        req->seg[i].first_sect + 1;
 
@@ -445,9 +447,6 @@
                        goto fail_response;
                preq.nr_sects += seg[i].nsec;
 
-               flags = GNTMAP_host_map;
-               if (operation != READ)
-                       flags |= GNTMAP_readonly;
                gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
                                  req->seg[i].gref, blkif->domid);
        }
diff -r 13937900bbd0 -r b78a36ec9895 drivers/xen/blktap/blktap.c
--- a/drivers/xen/blktap/blktap.c       Tue Apr 05 12:38:20 2011 +0100
+++ b/drivers/xen/blktap/blktap.c       Tue Apr 05 12:38:55 2011 +0100
@@ -133,20 +133,14 @@
 
 /*
  * Each outstanding request that we've passed to the lower device layers has a 
- * 'pending_req' allocated to it. Each buffer_head that completes decrements 
- * the pendcnt towards zero. When it hits zero, the specified domain has a 
- * response queued for it, with the saved 'id' passed back.
+ * 'pending_req' allocated to it.
  */
 typedef struct {
        blkif_t       *blkif;
        u64            id;
        unsigned short mem_idx;
-       int            nr_pages;
-       atomic_t       pendcnt;
-       unsigned short operation;
-       int            status;
+       unsigned short nr_pages;
        struct list_head free_list;
-       int            inuse;
 } pending_req_t;
 
 static pending_req_t *pending_reqs[MAX_PENDING_REQS];
@@ -983,10 +977,8 @@
                list_del(&req->free_list);
        }
 
-       if (req) {
-               req->inuse = 1;
+       if (req)
                alloc_pending_reqs++;
-       }
        spin_unlock_irqrestore(&pending_free_lock, flags);
 
        return req;
@@ -1000,7 +992,6 @@
        spin_lock_irqsave(&pending_free_lock, flags);
 
        alloc_pending_reqs--;
-       req->inuse = 0;
        if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
                mmap_inuse--;
                if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
@@ -1402,16 +1393,15 @@
                                 blkif_request_t *req,
                                 pending_req_t *pending_req)
 {
-       extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
-       int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
        unsigned int nseg;
-       int ret, i, nr_sects = 0;
+       int ret, i, op, nr_sects = 0;
        tap_blkif_t *info;
        blkif_request_t *target;
        unsigned int mmap_idx = pending_req->mem_idx;
        unsigned int pending_idx = RTN_PEND_IDX(pending_req, mmap_idx);
        unsigned int usr_idx;
+       uint32_t flags;
        struct mm_struct *mm;
        struct vm_area_struct *vma = NULL;
 
@@ -1454,9 +1444,11 @@
 
        pending_req->blkif     = blkif;
        pending_req->id        = req->id;
-       pending_req->operation = operation;
-       pending_req->status    = BLKIF_RSP_OKAY;
        pending_req->nr_pages  = nseg;
+
+       flags = GNTMAP_host_map;
+       if (req->operation == BLKIF_OP_WRITE)
+               flags |= GNTMAP_readonly;
        op = 0;
        mm = info->mm;
        if (!xen_feature(XENFEAT_auto_translated_physmap))
@@ -1465,14 +1457,10 @@
                unsigned long uvaddr;
                unsigned long kvaddr;
                uint64_t ptep;
-               uint32_t flags;
 
                uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
                kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
 
-               flags = GNTMAP_host_map;
-               if (operation == WRITE)
-                       flags |= GNTMAP_readonly;
                gnttab_set_map_op(&map[op], kvaddr, flags,
                                  req->seg[i].gref, blkif->domid);
                op++;
@@ -1486,11 +1474,9 @@
                                goto fail_flush;
                        }
 
-                       flags = GNTMAP_host_map | GNTMAP_application_map
-                               | GNTMAP_contains_pte;
-                       if (operation == WRITE)
-                               flags |= GNTMAP_readonly;
-                       gnttab_set_map_op(&map[op], ptep, flags,
+                       gnttab_set_map_op(&map[op], ptep,
+                                         flags | GNTMAP_application_map
+                                               | GNTMAP_contains_pte,
                                          req->seg[i].gref, blkif->domid);
                        op++;
                }
@@ -1620,10 +1606,14 @@
        wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
        info->ufe_ring.req_prod_pvt++;
 
-       if (operation == READ)
+       switch (req->operation) {
+       case BLKIF_OP_READ:
                blkif->st_rd_sect += nr_sects;
-       else if (operation == WRITE)
+               break;
+       case BLKIF_OP_WRITE:
                blkif->st_wr_sect += nr_sects;
+               break;
+       }
 
        return;
 
diff -r 13937900bbd0 -r b78a36ec9895 drivers/xen/scsiback/scsiback.c
--- a/drivers/xen/scsiback/scsiback.c   Tue Apr 05 12:38:20 2011 +0100
+++ b/drivers/xen/scsiback/scsiback.c   Tue Apr 05 12:38:55 2011 +0100
@@ -270,14 +270,14 @@
                        return -ENOMEM;
                }
 
-               for (i = 0; i < nr_segments; i++) {
-                       flags = GNTMAP_host_map;
-                       if (write)
-                               flags |= GNTMAP_readonly;
+               flags = GNTMAP_host_map;
+               if (write)
+                       flags |= GNTMAP_readonly;
+
+               for (i = 0; i < nr_segments; i++)
                        gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
                                                ring_req->seg[i].gref,
                                                info->domid);
-               }
 
                err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, 
nr_segments);
                BUG_ON(err);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [linux-2.6.18-xen] xen: cleanups to blkback and blktap, Xen patchbot-linux-2 . 6 . 18-xen <=