From: Jens Axboe <jens.axboe@xxxxxxxxxx>
Subject: xen/blkfront: use blk_rq_map_sg to generate ring entries
On occasion, the request will apparently have more segments than we
fit into the ring. Jens says:
> The second problem is that the block layer then appears to create one
> too many segments, but from the dump it has rq->nr_phys_segments ==
> BLKIF_MAX_SEGMENTS_PER_REQUEST. I suspect the latter is due to
> xen-blkfront not handling the merging on its own. It should check that
> the new page doesn't form part of the previous page. The
> rq_for_each_segment() iterates all single bits in the request, not dma
> segments. The "easiest" way to do this is to call blk_rq_map_sg() and
> then iterate the mapped sg list. That will give you what you are
> looking for.
> Here's a test patch, compiles but otherwise untested. I spent more
> time figuring out how to enable XEN than to code it up, so YMMV!
> Probably the sg list wants to be put inside the ring and only
> initialized on allocation, then you can get rid of the sg on stack and
> sg_init_table() loop call in the function. I'll leave that, and the
> testing, to you.
[Moved sg array into info structure, and initialize once. -J]
Signed-off-by: Jens Axboe <jens.axboe@xxxxxxxxxx>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@xxxxxxxxxx>
After testing on 2.6.27.19, made apply to 2.6.18 tree.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- sle11-2009-03-04.orig/drivers/xen/blkfront/blkfront.c 2009-03-04
11:38:21.000000000 +0100
+++ sle11-2009-03-04/drivers/xen/blkfront/blkfront.c 2009-03-04
11:38:33.000000000 +0100
@@ -40,6 +40,7 @@
#include <linux/cdrom.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <xen/evtchn.h>
#include <xen/xenbus.h>
@@ -232,6 +233,8 @@ static int setup_blkring(struct xenbus_d
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+ sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);
@@ -593,13 +596,11 @@ static int blkif_queue_request(struct re
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
blkif_request_t *ring_req;
- struct bio *bio;
- struct bio_vec *bvec;
- int idx;
unsigned long id;
unsigned int fsect, lsect;
- int ref;
+ int i, ref;
grant_ref_t gref_head;
+ struct scatterlist *sg;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
@@ -629,14 +631,13 @@ static int blkif_queue_request(struct re
if (blk_barrier_rq(req))
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
- ring_req->nr_segments = 0;
- rq_for_each_bio (bio, req) {
- bio_for_each_segment (bvec, bio, idx) {
- BUG_ON(ring_req->nr_segments
- == BLKIF_MAX_SEGMENTS_PER_REQUEST);
- buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
- fsect = bvec->bv_offset >> 9;
- lsect = fsect + (bvec->bv_len >> 9) - 1;
+ ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
+ BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0; i < ring_req->nr_segments; ++i) {
+ sg = info->sg + i;
+ buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT;
+ fsect = sg->offset >> 9;
+ lsect = fsect + (sg->length >> 9) - 1;
/* install a grant reference. */
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
@@ -646,17 +648,12 @@ static int blkif_queue_request(struct re
buffer_mfn,
rq_data_dir(req) ? GTF_readonly : 0 );
- info->shadow[id].frame[ring_req->nr_segments] =
- mfn_to_pfn(buffer_mfn);
-
- ring_req->seg[ring_req->nr_segments] =
+ info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
+ ring_req->seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
-
- ring_req->nr_segments++;
- }
}
info->ring.req_prod_pvt++;
--- sle11-2009-03-04.orig/drivers/xen/blkfront/block.h 2009-03-04
11:38:21.000000000 +0100
+++ sle11-2009-03-04/drivers/xen/blkfront/block.h 2009-03-04
11:38:33.000000000 +0100
@@ -102,6 +102,7 @@ struct blkfront_info
int connected;
int ring_ref;
blkif_front_ring_t ring;
+ struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int irq;
struct xlbd_major_info *mi;
request_queue_t *rq;
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|