Prepare for extending the block device ring to allow request
specific fields, by moving the request specific fields for
reads, writes and barrier requests to a union member
Signed-off-by: Owen Smith <owen.smith@xxxxxxxxxx>
---
drivers/block/xen-blkfront.c | 8 ++++----
drivers/xen/blkback/blkback.c | 16 ++++++++--------
include/xen/blkif.h | 8 ++++----
include/xen/interface/io/blkif.h | 16 +++++++++++-----
4 files changed, 27 insertions(+), 21 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 44059e6..3316dc7 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -286,7 +286,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -312,7 +312,7 @@ static int blkif_queue_request(struct request *req)
rq_data_dir(req) );
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->seg[i] =
+ ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
@@ -692,7 +692,7 @@ static void blkif_completion(struct blk_shadow *s)
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
- gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+ gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
static void
@@ -1010,7 +1010,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
- req->seg[j].gref,
+ req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir(
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c
index 0bef445..b45b21f 100644
--- a/drivers/xen/blkback/blkback.c
+++ b/drivers/xen/blkback/blkback.c
@@ -424,7 +424,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
}
preq.dev = req->handle;
- preq.sector_number = req->sector_number;
+ preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
pending_req->blkif = blkif;
@@ -436,11 +436,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
for (i = 0; i < nseg; i++) {
uint32_t flags;
- seg[i].nsec = req->seg[i].last_sect -
- req->seg[i].first_sect + 1;
+ seg[i].nsec = req->u.rw.seg[i].last_sect -
+ req->u.rw.seg[i].first_sect + 1;
- if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
- (req->seg[i].last_sect < req->seg[i].first_sect))
+ if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
+ (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
goto fail_response;
preq.nr_sects += seg[i].nsec;
@@ -448,7 +448,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
if (operation != READ)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
- req->seg[i].gref, blkif->domid);
+ req->u.rw.seg[i].gref, blkif->domid);
}
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
@@ -466,11 +466,11 @@ static void dispatch_rw_block_io(blkif_t *blkif,
page_to_pfn(pending_page(pending_req, i)),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
seg[i].buf = map[i].dev_bus_addr |
- (req->seg[i].first_sect << 9);
+ (req->u.rw.seg[i].first_sect << 9);
blkback_pagemap_set(vaddr_pagenr(pending_req, i),
pending_page(pending_req, i),
blkif->domid, req->handle,
- req->seg[i].gref);
+ req->u.rw.seg[i].gref);
pending_handle(pending_req, i) = map[i].handle;
}
diff --git a/include/xen/blkif.h b/include/xen/blkif.h
index 7172081..71018e9 100644
--- a/include/xen/blkif.h
+++ b/include/xen/blkif.h
@@ -97,12 +97,12 @@ static void inline blkif_get_x86_32_req(struct
blkif_request *dst, struct blkif_
dst->nr_segments = src->nr_segments;
dst->handle = src->handle;
dst->id = src->id;
- dst->sector_number = src->sector_number;
+ dst->u.rw.sector_number = src->sector_number;
barrier();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)
- dst->seg[i] = src->seg[i];
+ dst->u.rw.seg[i] = src->seg[i];
}
static void inline blkif_get_x86_64_req(struct blkif_request *dst, struct
blkif_x86_64_request *src)
@@ -112,12 +112,12 @@ static void inline blkif_get_x86_64_req(struct
blkif_request *dst, struct blkif_
dst->nr_segments = src->nr_segments;
dst->handle = src->handle;
dst->id = src->id;
- dst->sector_number = src->sector_number;
+ dst->u.rw.sector_number = src->sector_number;
barrier();
if (n > dst->nr_segments)
n = dst->nr_segments;
for (i = 0; i < n; i++)
- dst->seg[i] = src->seg[i];
+ dst->u.rw.seg[i] = src->seg[i];
}
#endif /* __XEN_BLKIF_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 68dd2b4..61e523a 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t;
*/
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
-struct blkif_request {
- uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t id; /* private guest value, echoed in resp */
+struct blkif_request_rw {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment {
grant_ref_t gref; /* reference to I/O buffer frame
*/
@@ -65,6 +61,16 @@ struct blkif_request {
} seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
+struct blkif_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ union {
+ struct blkif_request_rw rw;
+ } u;
+};
+
struct blkif_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
--
1.5.6.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|