ChangeSet 1.1350, 2005/04/21 10:14:12+01:00, kaf24@xxxxxxxxxxxxxxxxxxxx
Fix blkdev suspend/resume.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
arch/xen/kernel/gnttab.c | 20 +--
drivers/xen/blkfront/blkfront.c | 249 +++++++++++++++++++++-------------------
2 files changed, 146 insertions(+), 123 deletions(-)
diff -Nru a/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c
b/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c 2005-04-21 06:02:28
-04:00
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c 2005-04-21 06:02:28
-04:00
@@ -330,34 +330,36 @@
setup.nr_frames = NR_GRANT_FRAMES;
setup.frame_list = frames;
- if ( HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0 )
- BUG();
- if ( setup.status != 0 )
- BUG();
+ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0);
+ BUG_ON(setup.status != 0);
for ( i = 0; i < NR_GRANT_FRAMES; i++ )
set_fixmap_ma(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
- shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
-
- for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
- gnttab_free_list[i] = i + 1;
-
return 0;
}
int gnttab_suspend(void)
{
int i;
+
for ( i = 0; i < NR_GRANT_FRAMES; i++ )
clear_fixmap(FIX_GNTTAB_END - i);
+
return 0;
}
static int __init gnttab_init(void)
{
+ int i;
+
BUG_ON(gnttab_resume());
+ shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
+
+ for ( i = 0; i < NR_GRANT_ENTRIES; i++ )
+ gnttab_free_list[i] = i + 1;
+
/*
* /proc/xen/grant : used by libxc to access grant tables
*/
diff -Nru a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c 2005-04-21
06:02:28 -04:00
+++ b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c 2005-04-21
06:02:28 -04:00
@@ -94,37 +94,38 @@
static grant_ref_t gref_head, gref_terminal;
#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
+#define GRANTREF_INVALID (1<<15)
#endif
-unsigned long rec_ring_free;
-blkif_request_t rec_ring[BLK_RING_SIZE];
+static struct blk_shadow {
+ blkif_request_t req;
+ unsigned long request;
+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+} blk_shadow[BLK_RING_SIZE];
+unsigned long blk_shadow_free;
-static int recovery = 0; /* "Recovery in progress" flag. Protected
- * by the blkif_io_lock */
+static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
static void kick_pending_request_queues(void);
int __init xlblk_init(void);
-void blkif_completion( blkif_request_t *req );
+static void blkif_completion(struct blk_shadow *s);
-static inline int GET_ID_FROM_FREELIST( void )
+static inline int GET_ID_FROM_FREELIST(void)
{
- unsigned long free = rec_ring_free;
-
+ unsigned long free = blk_shadow_free;
BUG_ON(free > BLK_RING_SIZE);
-
- rec_ring_free = rec_ring[free].id;
-
- rec_ring[free].id = 0x0fffffee; /* debug */
-
+ blk_shadow_free = blk_shadow[free].req.id;
+ blk_shadow[free].req.id = 0x0fffffee; /* debug */
return free;
}
-static inline void ADD_ID_TO_FREELIST( unsigned long id )
+static inline void ADD_ID_TO_FREELIST(unsigned long id)
{
- rec_ring[id].id = rec_ring_free;
- rec_ring_free = id;
+ blk_shadow[id].req.id = blk_shadow_free;
+ blk_shadow[id].request = 0;
+ blk_shadow_free = id;
}
@@ -138,41 +139,31 @@
#define DISABLE_SCATTERGATHER() (sg_operation = -1)
#endif
-static inline void translate_req_to_pfn(blkif_request_t *xreq,
- blkif_request_t *req)
+static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
{
+#ifndef CONFIG_XEN_BLKDEV_GRANT
int i;
+#endif
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- /* preserve id */
- xreq->sector_number = req->sector_number;
+ s->req = *r;
- for ( i = 0; i < req->nr_segments; i++ )
-#ifdef CONFIG_XEN_BLKDEV_GRANT
- xreq->frame_and_sects[i] = req->frame_and_sects[i];
-#else
- xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+#ifndef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < r->nr_segments; i++ )
+ s->req.frame_and_sects[i] = machine_to_phys(r->frame_and_sects[i]);
#endif
}
-static inline void translate_req_to_mfn(blkif_request_t *xreq,
- blkif_request_t *req)
+static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s)
{
+#ifndef CONFIG_XEN_BLKDEV_GRANT
int i;
+#endif
- xreq->operation = req->operation;
- xreq->nr_segments = req->nr_segments;
- xreq->device = req->device;
- xreq->id = req->id; /* copy id (unlike above) */
- xreq->sector_number = req->sector_number;
+ *r = s->req;
- for ( i = 0; i < req->nr_segments; i++ )
-#ifdef CONFIG_XEN_BLKDEV_GRANT
- xreq->frame_and_sects[i] = req->frame_and_sects[i];
-#else
- xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+#ifndef CONFIG_XEN_BLKDEV_GRANT
+ for ( i = 0; i < s->req.nr_segments; i++ )
+ r->frame_and_sects[i] = phys_to_machine(s->req.frame_and_sects[i]);
#endif
}
@@ -185,8 +176,6 @@
}
-
-
/************************** KERNEL VERSION 2.6 **************************/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
@@ -208,7 +197,6 @@
static void kick_pending_request_queues(void)
{
-
if ( (xlbd_blk_queue != NULL) &&
test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
{
@@ -218,7 +206,6 @@
*/
xlbd_blk_queue->request_fn(xlbd_blk_queue);
}
-
}
@@ -243,9 +230,8 @@
* When usage drops to zero it may allow more VBD updates to occur.
* Update of usage count is protected by a per-device semaphore.
*/
- if (--di->mi->usage == 0) {
+ if ( --di->mi->usage == 0 )
vbd_update();
- }
return 0;
}
@@ -259,8 +245,8 @@
DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
command, (long)argument, inode->i_rdev);
- switch (command) {
-
+ switch ( command )
+ {
case HDIO_GETGEO:
/* return ENOSYS to use defaults */
return -ENOSYS;
@@ -312,7 +298,7 @@
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
id = GET_ID_FROM_FREELIST();
- rec_ring[id].id = (unsigned long) req;
+ blk_shadow[id].request = (unsigned long)req;
ring_req->id = id;
ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
@@ -341,8 +327,12 @@
buffer_ma >> PAGE_SHIFT,
rq_data_dir(req) );
+ blk_shadow[id].frame[ring_req->nr_segments] =
+ buffer_ma >> PAGE_SHIFT;
+
ring_req->frame_and_sects[ring_req->nr_segments++] =
(((u32) ref) << 16) | (fsect << 3) | lsect;
+
#else
ring_req->frame_and_sects[ring_req->nr_segments++] =
buffer_ma | (fsect << 3) | lsect;
@@ -353,7 +343,7 @@
blk_ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- translate_req_to_pfn(&rec_ring[id], ring_req);
+ pickle_request(&blk_shadow[id], ring_req);
return 0;
}
@@ -372,8 +362,10 @@
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
- if (!blk_fs_request(req)) {
+ while ( (req = elv_next_request(rq)) != NULL )
+ {
+ if ( !blk_fs_request(req) )
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|