# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 317db130cbbfc1d2ff20b5a7943bbdd22cf08d9c
# Parent 531ad4bde8f249c318588f41bdbb72d0b0e0ea9c
First pass at using one block interface per device.
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>
diff -r 531ad4bde8f2 -r 317db130cbbf
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Mon Aug 22
10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Mon Aug 22
14:22:59 2005
@@ -58,6 +58,21 @@
#include <asm-xen/xen-public/grant_table.h>
#include <asm-xen/gnttab.h>
+struct blkfront_info
+{
+ /* We watch the backend */
+ struct xenbus_watch watch;
+ int vdevice;
+ u16 handle;
+ int connected;
+ struct xenbus_device *dev;
+ char *backend;
+ int backend_id;
+ int grant_id;
+ blkif_front_ring_t ring;
+ unsigned int evtchn;
+};
+
typedef unsigned char byte; /* from linux/ide.h */
/* Control whether runtime update of vbds is enabled. */
@@ -68,20 +83,14 @@
#define BLKIF_STATE_CONNECTED 2
static unsigned int blkif_state = BLKIF_STATE_CLOSED;
-static unsigned int blkif_evtchn = 0;
-static unsigned int blkif_vbds = 0;
static unsigned int blkif_vbds_connected = 0;
-static blkif_front_ring_t blk_ring;
-
#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
-static domid_t rdomid = 0;
-static grant_ref_t gref_head, gref_terminal;
#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
#define GRANTREF_INVALID (1<<15)
-static int shmem_ref;
+static grant_ref_t gref_head, gref_terminal;
static struct blk_shadow {
blkif_request_t req;
@@ -138,11 +147,11 @@
}
-static inline void flush_requests(void)
+static inline void flush_requests(struct blkfront_info *info)
{
DISABLE_SCATTERGATHER();
- RING_PUSH_REQUESTS(&blk_ring);
- notify_via_evtchn(blkif_evtchn);
+ RING_PUSH_REQUESTS(&info->ring);
+ notify_via_evtchn(info->evtchn);
}
@@ -156,7 +165,7 @@
static void kick_pending_request_queues(void)
{
struct xlbd_disk_info *di;
- while ( ((di = head_waiting) != NULL) && !RING_FULL(&blk_ring) )
+ while ( ((di = head_waiting) != NULL) && !RING_FULL(&di->info->ring) )
{
head_waiting = di->next_waiting;
di->next_waiting = NULL;
@@ -242,7 +251,7 @@
return 1;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
+ ring_req = RING_GET_REQUEST(&di->info->ring, di->info->ring.req_prod_pvt);
id = GET_ID_FROM_FREELIST();
blk_shadow[id].request = (unsigned long)req;
@@ -268,7 +277,7 @@
gnttab_grant_foreign_access_ref(
ref,
- rdomid,
+ di->info->backend_id,
buffer_ma >> PAGE_SHIFT,
rq_data_dir(req) );
@@ -280,7 +289,7 @@
}
}
- blk_ring.req_prod_pvt++;
+ di->info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
pickle_request(&blk_shadow[id], ring_req);
@@ -295,7 +304,7 @@
*/
void do_blkif_request(request_queue_t *rq)
{
- struct xlbd_disk_info *di;
+ struct xlbd_disk_info *di = NULL;
struct request *req;
int queued;
@@ -305,13 +314,15 @@
while ( (req = elv_next_request(rq)) != NULL )
{
+ di = req->rq_disk->private_data;
+
if ( !blk_fs_request(req) )
{
end_request(req, 0);
continue;
}
- if ( RING_FULL(&blk_ring) )
+ if ( RING_FULL(&di->info->ring) )
goto wait;
DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
@@ -323,7 +334,6 @@
if ( blkif_queue_request(req) )
{
wait:
- di = req->rq_disk->private_data;
if ( di->next_waiting == NULL )
{
di->next_waiting = head_waiting;
@@ -338,7 +348,7 @@
}
if ( queued != 0 )
- flush_requests();
+ flush_requests(di->info);
}
@@ -347,7 +357,8 @@
struct request *req;
blkif_response_t *bret;
RING_IDX i, rp;
- unsigned long flags;
+ unsigned long flags;
+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
spin_lock_irqsave(&blkif_io_lock, flags);
@@ -358,14 +369,14 @@
return IRQ_HANDLED;
}
- rp = blk_ring.sring->rsp_prod;
+ rp = info->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- for ( i = blk_ring.rsp_cons; i != rp; i++ )
+ for ( i = info->ring.rsp_cons; i != rp; i++ )
{
unsigned long id;
- bret = RING_GET_RESPONSE(&blk_ring, i);
+ bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
req = (struct request *)blk_shadow[id].request;
@@ -394,7 +405,7 @@
}
}
- blk_ring.rsp_cons = i;
+ info->ring.rsp_cons = i;
kick_pending_request_queues();
@@ -426,10 +437,10 @@
{
/* We kick pending request queues if the ring is reasonably empty. */
if ( (nr_pending != 0) &&
- (RING_PENDING_REQUESTS(&blk_ring) < (BLK_RING_SIZE >> 1)) )
+ (RING_PENDING_REQUESTS(&info->ring) < (BLK_RING_SIZE >> 1)) )
{
/* Attempt to drain the queue, but bail if the ring becomes full. */
- while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
+ while ( (nr_pending != 0) && !RING_FULL(&info->ring) )
do_blkif_request(pending_queues[--nr_pending]);
}
}
@@ -725,8 +736,8 @@
(sg_dev == device) &&
(sg_next_sect == sector_number) )
{
- req = RING_GET_REQUEST(&blk_ring,
- blk_ring.req_prod_pvt - 1);
+ req = RING_GET_REQUEST(&info->ring,
+ info->ring.req_prod_pvt - 1);
bh = (struct buffer_head *)id;
bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request;
@@ -738,7 +749,7 @@
gnttab_grant_foreign_access_ref(
ref,
- rdomid,
+ info->backend_id,
buffer_ma >> PAGE_SHIFT,
( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
@@ -757,7 +768,7 @@
return 0;
}
- else if ( RING_FULL(&blk_ring) )
+ else if ( RING_FULL(&info->ring) )
{
return 1;
}
@@ -774,7 +785,7 @@
}
/* Fill out a communications ring structure. */
- req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
+ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
xid = GET_ID_FROM_FREELIST();
blk_shadow[xid].request = (unsigned long)id;
@@ -790,7 +801,7 @@
gnttab_grant_foreign_access_ref(
ref,
- rdomid,
+ info->backend_id,
buffer_ma >> PAGE_SHIFT,
( operation == BLKIF_OP_WRITE ? 1 : 0 ) );
@@ -801,7 +812,7 @@
/* Keep a private copy so we can reissue requests when recovering. */
pickle_request(&blk_shadow[xid], req);
- blk_ring.req_prod_pvt++;
+ info->ring.req_prod_pvt++;
return 0;
}
@@ -903,15 +914,15 @@
return;
}
- rp = blk_ring.sring->rsp_prod;
+ rp = info->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
- for ( i = blk_ring.rsp_cons; i != rp; i++ )
+ for ( i = info->ring.rsp_cons; i != rp; i++ )
{
unsigned long id;
blkif_response_t *bret;
- bret = RING_GET_RESPONSE(&blk_ring, i);
+ bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
bh = (struct buffer_head *)blk_shadow[id].request;
@@ -943,7 +954,7 @@
}
}
- blk_ring.rsp_cons = i;
+ info->ring.rsp_cons = i;
kick_pending_request_queues();
@@ -954,7 +965,7 @@
/***************************** COMMON CODE *******************************/
-static void blkif_free(void)
+static void blkif_free(struct blkfront_info *info)
{
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&blkif_io_lock);
@@ -962,16 +973,16 @@
spin_unlock_irq(&blkif_io_lock);
/* Free resources associated with old device channel. */
- if ( blk_ring.sring != NULL )
- {
- free_page((unsigned long)blk_ring.sring);
- blk_ring.sring = NULL;
- }
- unbind_evtchn_from_irqhandler(blkif_evtchn, NULL);
- blkif_evtchn = 0;
-}
-
-static void blkif_recover(void)
+ if ( info->ring.sring != NULL )
+ {
+ free_page((unsigned long)info->ring.sring);
+ info->ring.sring = NULL;
+ }
+ unbind_evtchn_from_irqhandler(info->evtchn, NULL);
+ info->evtchn = 0;
+}
+
+static void blkif_recover(struct blkfront_info *info)
{
int i;
blkif_request_t *req;
@@ -987,7 +998,7 @@
memset(&blk_shadow, 0, sizeof(blk_shadow));
for ( i = 0; i < BLK_RING_SIZE; i++ )
blk_shadow[i].req.id = i+1;
- blk_shadow_free = blk_ring.req_prod_pvt;
+ blk_shadow_free = info->ring.req_prod_pvt;
blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
/* Stage 3: Find pending requests and requeue them. */
@@ -999,7 +1010,7 @@
/* Grab a request slot and unpickle shadow state into it. */
req = RING_GET_REQUEST(
- &blk_ring, blk_ring.req_prod_pvt);
+ &info->ring, info->ring.req_prod_pvt);
unpickle_request(req, ©[i]);
/* We get a new request id, and must reset the shadow state. */
@@ -1012,7 +1023,7 @@
if ( req->frame_and_sects[j] & GRANTREF_INVALID )
gnttab_grant_foreign_access_ref(
blkif_gref_from_fas(req->frame_and_sects[j]),
- rdomid,
+ info->backend_id,
blk_shadow[req->id].frame[j],
rq_data_dir((struct request *)
blk_shadow[req->id].request));
@@ -1020,32 +1031,31 @@
}
blk_shadow[req->id].req = *req;
- blk_ring.req_prod_pvt++;
+ info->ring.req_prod_pvt++;
}
kfree(copy);
recovery = 0;
- /* blk_ring->req_prod will be set when we flush_requests().*/
+ /* info->ring->req_prod will be set when we flush_requests().*/
wmb();
/* Kicks things back into life. */
- flush_requests();
+ flush_requests(info);
/* Now safe to left other people use the interface. */
blkif_state = BLKIF_STATE_CONNECTED;
}
-static void blkif_connect(u16 evtchn, domid_t domid)
+static void blkif_connect(struct blkfront_info *info, u16 evtchn)
{
int err = 0;
- blkif_evtchn = evtchn;
- rdomid = domid;
+ info->evtchn = evtchn;
err = bind_evtchn_to_irqhandler(
- blkif_evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
+ info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
if ( err != 0 )
{
WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
@@ -1057,17 +1067,6 @@
static struct xenbus_device_id blkfront_ids[] = {
{ "vbd" },
{ "" }
-};
-
-struct blkfront_info
-{
- /* We watch the backend */
- struct xenbus_watch watch;
- int vdevice;
- u16 handle;
- int connected;
- struct xenbus_device *dev;
- char *backend;
};
static void watch_for_status(struct xenbus_watch *watch, const char *node)
@@ -1094,7 +1093,7 @@
return;
}
- xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size);
+ xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size,
info);
info->connected = 1;
/* First to connect? blkif is now connected. */
@@ -1109,7 +1108,7 @@
spin_unlock_irq(&blkif_io_lock);
}
-static int setup_blkring(struct xenbus_device *dev, unsigned int backend_id)
+static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info)
{
blkif_sring_t *sring;
evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
@@ -1121,25 +1120,25 @@
return -ENOMEM;
}
SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&blk_ring, sring, PAGE_SIZE);
-
- shmem_ref = gnttab_claim_grant_reference(&gref_head,
- gref_terminal);
- ASSERT(shmem_ref != -ENOSPC);
- gnttab_grant_foreign_access_ref(shmem_ref,
- backend_id,
- virt_to_mfn(blk_ring.sring),
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ info->grant_id = gnttab_claim_grant_reference(&gref_head,
+ gref_terminal);
+ ASSERT(info->grant_id != -ENOSPC);
+ gnttab_grant_foreign_access_ref(info->grant_id,
+ info->backend_id,
+ virt_to_mfn(info->ring.sring),
0);
- op.u.alloc_unbound.dom = backend_id;
+ op.u.alloc_unbound.dom = info->backend_id;
err = HYPERVISOR_event_channel_op(&op);
if (err) {
- free_page((unsigned long)blk_ring.sring);
- blk_ring.sring = 0;
+ free_page((unsigned long)info->ring.sring);
+ info->ring.sring = 0;
xenbus_dev_error(dev, err, "allocating event channel");
return err;
}
- blkif_connect(op.u.alloc_unbound.port, backend_id);
+ blkif_connect(info, op.u.alloc_unbound.port);
return 0;
}
@@ -1149,11 +1148,11 @@
{
char *backend;
const char *message;
- int err, backend_id;
+ int err;
backend = NULL;
err = xenbus_gather(dev->nodename,
- "backend-id", "%i", &backend_id,
+ "backend-id", "%i", &info->backend_id,
"backend", NULL, &backend,
NULL);
if (XENBUS_EXIST_ERR(err))
@@ -1168,12 +1167,10 @@
goto out;
}
- /* First device? We create shared ring, alloc event channel. */
- if (blkif_vbds == 0) {
- err = setup_blkring(dev, backend_id);
- if (err)
- goto out;
- }
+ /* Create shared ring, alloc event channel. */
+ err = setup_blkring(dev, info);
+ if (err)
+ goto out;
err = xenbus_transaction_start(dev->nodename);
if (err) {
@@ -1181,13 +1178,13 @@
goto destroy_blkring;
}
- err = xenbus_printf(dev->nodename, "grant-id","%u", shmem_ref);
+ err = xenbus_printf(dev->nodename, "grant-id","%u", info->grant_id);
if (err) {
message = "writing grant-id";
goto abort_transaction;
}
err = xenbus_printf(dev->nodename,
- "event-channel", "%u", blkif_evtchn);
+ "event-channel", "%u", info->evtchn);
if (err) {
message = "writing event-channel";
goto abort_transaction;
@@ -1220,8 +1217,7 @@
/* Have to do this *outside* transaction. */
xenbus_dev_error(dev, err, "%s", message);
destroy_blkring:
- if (blkif_vbds == 0)
- blkif_free();
+ blkif_free(info);
goto out;
}
@@ -1266,7 +1262,6 @@
/* Call once in case entries already there. */
watch_for_status(&info->watch, info->watch.node);
- blkif_vbds++;
return 0;
}
@@ -1281,11 +1276,11 @@
xlvbd_del(info->handle);
blkif_vbds_connected--;
}
+
+ blkif_free(info);
+
kfree(info->backend);
kfree(info);
-
- if (--blkif_vbds == 0)
- blkif_free();
return 0;
}
@@ -1298,10 +1293,8 @@
kfree(info->backend);
info->backend = NULL;
- if (--blkif_vbds == 0) {
- recovery = 1;
- blkif_free();
- }
+ recovery = 1;
+ blkif_free(info);
return 0;
}
@@ -1314,8 +1307,7 @@
/* FIXME: Check geometry hasn't changed here... */
err = talk_to_backend(dev, info);
if (!err) {
- if (blkif_vbds++ == 0)
- blkif_recover();
+ blkif_recover(info);
}
return err;
}
@@ -1363,14 +1355,14 @@
{
int i;
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+ (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
+
/* A grant for every ring slot, plus one for the ring itself. */
if (gnttab_alloc_grant_references(MAXIMUM_OUTSTANDING_BLOCK_REQS + 1,
&gref_head, &gref_terminal) < 0)
return 1;
-
- if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
- (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
- return 0;
IPRINTK("Initialising virtual block device driver\n");
diff -r 531ad4bde8f2 -r 317db130cbbf
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Mon Aug 22 10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Mon Aug 22 14:22:59 2005
@@ -79,6 +79,8 @@
#define DPRINTK_IOCTL(_f, _a...) ((void)0)
#endif
+struct blkfront_info;
+
struct xlbd_type_info {
int partn_shift;
int disks_per_major;
@@ -106,6 +108,7 @@
struct xlbd_disk_info *next_waiting;
request_queue_t *rq;
#endif
+ struct blkfront_info *info;
};
typedef struct xen_block {
@@ -124,6 +127,6 @@
/* Virtual block-device subsystem. */
int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
- u16 info, u16 sector_size);
+ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
void xlvbd_del(blkif_vdev_t handle);
#endif /* __XEN_DRIVERS_BLOCK_H__ */
diff -r 531ad4bde8f2 -r 317db130cbbf
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Mon Aug 22 10:21:18 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c Mon Aug 22 14:22:59 2005
@@ -222,7 +222,8 @@
static struct gendisk *xlvbd_alloc_gendisk(
struct xlbd_major_info *mi, int minor, blkif_sector_t capacity,
- int device, blkif_vdev_t handle, u16 info, u16 sector_size)
+ int device, blkif_vdev_t handle, u16 vdisk_info, u16 sector_size,
+ struct blkfront_info *info)
{
struct gendisk *gd;
struct xlbd_disk_info *di;
@@ -235,6 +236,7 @@
di->mi = mi;
di->xd_device = device;
di->handle = handle;
+ di->info = info;
if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
nr_minors = 1 << mi->type->partn_shift;
@@ -266,13 +268,13 @@
di->rq = gd->queue;
- if (info & VDISK_READONLY)
+ if (vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
- if (info & VDISK_REMOVABLE)
+ if (vdisk_info & VDISK_REMOVABLE)
gd->flags |= GENHD_FL_REMOVABLE;
- if (info & VDISK_CDROM)
+ if (vdisk_info & VDISK_CDROM)
gd->flags |= GENHD_FL_CD;
add_disk(gd);
@@ -285,7 +287,7 @@
}
int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
- u16 info, u16 sector_size)
+ u16 vdisk_info, u16 sector_size, struct blkfront_info *info)
{
struct lvdisk *new;
struct block_device *bd;
@@ -300,7 +302,7 @@
if (new == NULL)
return -ENOMEM;
new->capacity = capacity;
- new->info = info;
+ new->info = vdisk_info;
new->handle = handle;
new->dev = MKDEV(MAJOR_XEN(device), MINOR_XEN(device));
@@ -309,7 +311,7 @@
goto out;
gd = xlvbd_alloc_gendisk(mi, MINOR_XEN(device), capacity, device, handle,
- info, sector_size);
+ vdisk_info, sector_size, info);
if (gd == NULL)
goto out_bd;
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|