Just like in blkfront, not doing so can cause the maximum number of
segments check to trigger.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>
--- head-2009-05-29.orig/drivers/xen/blktap2/blktap.h 2009-05-29
10:25:53.000000000 +0200
+++ head-2009-05-29/drivers/xen/blktap2/blktap.h 2009-05-29
16:25:25.000000000 +0200
@@ -4,6 +4,7 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/cdev.h>
+#include <linux/scatterlist.h>
#include <xen/blkif.h>
#include <xen/gnttab.h>
@@ -174,6 +175,7 @@ struct blktap {
int pending_cnt;
struct blktap_request *pending_requests[MAX_PENDING_REQS];
+ struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
wait_queue_head_t wq;
struct list_head deferred_queue;
--- head-2009-05-29.orig/drivers/xen/blktap2/device.c 2009-05-29
10:25:53.000000000 +0200
+++ head-2009-05-29/drivers/xen/blktap2/device.c 2009-05-29
16:30:25.000000000 +0200
@@ -569,11 +569,10 @@ blktap_device_process_request(struct blk
struct blktap_request *request,
struct request *req)
{
- struct bio *bio;
struct page *page;
- struct bio_vec *bvec;
- int idx, usr_idx, err;
+ int i, usr_idx, err;
struct blktap_ring *ring;
+ struct scatterlist *sg;
struct blktap_grant_table table;
unsigned int fsect, lsect, nr_sects;
unsigned long offset, uvaddr, kvaddr;
@@ -600,43 +599,39 @@ blktap_device_process_request(struct blk
nr_sects = 0;
request->nr_pages = 0;
- blkif_req.nr_segments = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bvec, bio, idx) {
- BUG_ON(blkif_req.nr_segments ==
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- fsect = bvec->bv_offset >> 9;
- lsect = fsect + (bvec->bv_len >> 9) - 1;
- nr_sects += bvec->bv_len >> 9;
+ blkif_req.nr_segments = blk_rq_map_sg(req->q, req, tap->sg);
+ BUG_ON(blkif_req.nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ for (i = 0; i < blkif_req.nr_segments; ++i) {
+ sg = tap->sg + i;
+ fsect = sg->offset >> 9;
+ lsect = fsect + (sg->length >> 9) - 1;
+ nr_sects += sg->length >> 9;
- blkif_req.seg[blkif_req.nr_segments] =
+ blkif_req.seg[i] =
(struct blkif_request_segment) {
.gref = 0,
.first_sect = fsect,
.last_sect = lsect };
- if (PageBlkback(bvec->bv_page)) {
+ if (PageBlkback(sg->page)) {
/* foreign page -- use xen */
if (blktap_prep_foreign(tap,
request,
&blkif_req,
- blkif_req.nr_segments,
- bvec->bv_page,
+ i,
+ sg->page,
&table))
goto out;
} else {
/* do it the old fashioned way */
blktap_map(tap,
request,
- blkif_req.nr_segments,
- bvec->bv_page);
+ i,
+ sg->page);
}
- uvaddr = MMAP_VADDR(ring->user_vstart,
- usr_idx, blkif_req.nr_segments);
- kvaddr = request_to_kaddr(request,
- blkif_req.nr_segments);
+ uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
+ kvaddr = request_to_kaddr(request, i);
offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT;
page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
ring->foreign_map.map[offset] = page;
@@ -646,12 +641,10 @@ blktap_device_process_request(struct blk
uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT);
BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, "
"page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n",
- offset, request, blkif_req.nr_segments,
+ offset, request, i,
page, kvaddr, uvaddr);
- blkif_req.nr_segments++;
request->nr_pages++;
- }
}
if (blktap_map_foreign(tap, request, &blkif_req, &table))
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|