WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] merge?

# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID ffbc98d735bdb058a8cff968980ea276eddce146
# Parent  7cccdb49af7537b42b0df1d879713957a4bc51b0
# Parent  7fbaf67a0af5048f94640ea0e44b88408b21b791
merge?

diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Fri Sep 16 18:06:42 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Fri Sep 16 18:07:50 2005
@@ -45,12 +45,12 @@
        return 0;
 }
 
-int direct_remap_pfn_range(struct mm_struct *mm,
-                           unsigned long address, 
-                           unsigned long mfn,
-                           unsigned long size, 
-                           pgprot_t prot,
-                           domid_t  domid)
+static int __direct_remap_pfn_range(struct mm_struct *mm,
+                                   unsigned long address, 
+                                   unsigned long mfn,
+                                   unsigned long size, 
+                                   pgprot_t prot,
+                                   domid_t  domid)
 {
        int i;
        unsigned long start_address;
@@ -98,6 +98,20 @@
        return 0;
 }
 
+int direct_remap_pfn_range(struct vm_area_struct *vma,
+                          unsigned long address, 
+                          unsigned long mfn,
+                          unsigned long size, 
+                          pgprot_t prot,
+                          domid_t  domid)
+{
+       /* Same as remap_pfn_range(). */
+       vma->vm_flags |= VM_IO | VM_RESERVED;
+
+       return __direct_remap_pfn_range(
+               vma->vm_mm, address, mfn, size, prot, domid);
+}
+
 EXPORT_SYMBOL(direct_remap_pfn_range);
 
 
@@ -221,8 +235,9 @@
 #ifdef __x86_64__
        flags |= _PAGE_USER;
 #endif
-       if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, 
phys_addr>>PAGE_SHIFT,
-                                   size, __pgprot(flags), domid)) {
+       if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
+                                    phys_addr>>PAGE_SHIFT,
+                                    size, __pgprot(flags), domid)) {
                vunmap((void __force *) addr);
                return NULL;
        }
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c     Fri Sep 16 18:06:42 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c     Fri Sep 16 18:07:50 2005
@@ -295,7 +295,7 @@
        /* Write-combine setting is ignored, it is changed via the mtrr
         * interfaces on this platform.
         */
-       if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff,
+       if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                                   vma->vm_end - vma->vm_start,
                                   vma->vm_page_prot, DOMID_IO))
                return -EAGAIN;
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/arch/xen/kernel/devmem.c
--- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c     Fri Sep 16 18:06:42 2005
+++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c     Fri Sep 16 18:07:50 2005
@@ -90,22 +90,10 @@
 
 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
 {
-       int uncached;
-
-       uncached = uncached_access(file);
-       if (uncached)
+       if (uncached_access(file))
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       /* Don't try to swap out physical pages.. */
-       vma->vm_flags |= VM_RESERVED;
-
-       /*
-        * Don't dump addresses that are not real memory to a core file.
-        */
-       if (uncached)
-               vma->vm_flags |= VM_IO;
-
-       if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff,
+       if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                                   vma->vm_end - vma->vm_start,
                                   vma->vm_page_prot, DOMID_IO))
                return -EAGAIN;
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 18:07:50 2005
@@ -18,16 +18,10 @@
 #include <asm-xen/xen-public/io/netif.h>
 #include <asm/io.h>
 #include <asm/pgalloc.h>
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
 #include <asm-xen/xen-public/grant_table.h>
 #include <asm-xen/gnttab.h>
 
 #define GRANT_INVALID_REF (0xFFFF)
-
-#endif
-
-
 
 #if 0
 #define ASSERT(_p) \
@@ -44,74 +38,73 @@
 #define WPRINTK(fmt, args...) \
     printk(KERN_WARNING "xen_net: " fmt, ##args)
 
+typedef struct netif_st {
+       /* Unique identifier for this interface. */
+       domid_t          domid;
+       unsigned int     handle;
 
-typedef struct netif_st {
-    /* Unique identifier for this interface. */
-    domid_t          domid;
-    unsigned int     handle;
+       u8               fe_dev_addr[6];
 
-    u8               fe_dev_addr[6];
+       /* Physical parameters of the comms window. */
+       unsigned long    tx_shmem_frame;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       u16              tx_shmem_handle;
+       unsigned long    tx_shmem_vaddr; 
+       grant_ref_t      tx_shmem_ref; 
+#endif
+       unsigned long    rx_shmem_frame;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       u16              rx_shmem_handle;
+       unsigned long    rx_shmem_vaddr; 
+       grant_ref_t      rx_shmem_ref; 
+#endif
+       unsigned int     evtchn;
+       unsigned int     remote_evtchn;
 
-    /* Physical parameters of the comms window. */
-    unsigned long    tx_shmem_frame;
+       /* The shared rings and indexes. */
+       netif_tx_interface_t *tx;
+       netif_rx_interface_t *rx;
+
+       /* Private indexes into shared ring. */
+       NETIF_RING_IDX rx_req_cons;
+       NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    u16              tx_shmem_handle;
-    unsigned long    tx_shmem_vaddr; 
-    grant_ref_t      tx_shmem_ref; 
+       NETIF_RING_IDX rx_resp_prod_copy;
 #endif
-    unsigned long    rx_shmem_frame;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    u16              rx_shmem_handle;
-    unsigned long    rx_shmem_vaddr; 
-    grant_ref_t      rx_shmem_ref; 
-#endif
-    unsigned int     evtchn;
-    unsigned int     remote_evtchn;
+       NETIF_RING_IDX tx_req_cons;
+       NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
 
-    /* The shared rings and indexes. */
-    netif_tx_interface_t *tx;
-    netif_rx_interface_t *rx;
+       /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+       unsigned long   credit_bytes;
+       unsigned long   credit_usec;
+       unsigned long   remaining_credit;
+       struct timer_list credit_timeout;
 
-    /* Private indexes into shared ring. */
-    NETIF_RING_IDX rx_req_cons;
-    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    NETIF_RING_IDX rx_resp_prod_copy; /* private version of shared variable */
-#endif
-    NETIF_RING_IDX tx_req_cons;
-    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+       /* Miscellaneous private stuff. */
+       enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+       int active;
+       struct list_head list;  /* scheduling list */
+       atomic_t         refcnt;
+       struct net_device *dev;
+       struct net_device_stats stats;
 
-    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-    unsigned long   credit_bytes;
-    unsigned long   credit_usec;
-    unsigned long   remaining_credit;
-    struct timer_list credit_timeout;
-
-    /* Miscellaneous private stuff. */
-    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-    int active;
-    struct list_head list;  /* scheduling list */
-    atomic_t         refcnt;
-    struct net_device *dev;
-    struct net_device_stats stats;
-
-    struct work_struct free_work;
+       struct work_struct free_work;
 } netif_t;
 
 void netif_creditlimit(netif_t *netif);
 int  netif_disconnect(netif_t *netif);
 
 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
-void free_netif_callback(netif_t *netif);
+void free_netif(netif_t *netif);
 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
              unsigned long rx_ring_ref, unsigned int evtchn);
 
 #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define netif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            free_netif_callback(_b);              \
-    } while (0)
+#define netif_put(_b)                                          \
+       do {                                                    \
+               if ( atomic_dec_and_test(&(_b)->refcnt) )       \
+                       free_netif(_b);                         \
+       } while (0)
 
 void netif_xenbus_init(void);
 
@@ -123,3 +116,13 @@
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
 #endif /* __NETIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep 16 
18:07:50 2005
@@ -11,104 +11,105 @@
 
 static void __netif_up(netif_t *netif)
 {
-    struct net_device *dev = netif->dev;
-    spin_lock_bh(&dev->xmit_lock);
-    netif->active = 1;
-    spin_unlock_bh(&dev->xmit_lock);
-    (void)bind_evtchn_to_irqhandler(
-        netif->evtchn, netif_be_int, 0, dev->name, netif);
-    netif_schedule_work(netif);
+       struct net_device *dev = netif->dev;
+       spin_lock_bh(&dev->xmit_lock);
+       netif->active = 1;
+       spin_unlock_bh(&dev->xmit_lock);
+       (void)bind_evtchn_to_irqhandler(
+               netif->evtchn, netif_be_int, 0, dev->name, netif);
+       netif_schedule_work(netif);
 }
 
 static void __netif_down(netif_t *netif)
 {
-    struct net_device *dev = netif->dev;
-    spin_lock_bh(&dev->xmit_lock);
-    netif->active = 0;
-    spin_unlock_bh(&dev->xmit_lock);
-    unbind_evtchn_from_irqhandler(netif->evtchn, netif);
-    netif_deschedule_work(netif);
+       struct net_device *dev = netif->dev;
+       spin_lock_bh(&dev->xmit_lock);
+       netif->active = 0;
+       spin_unlock_bh(&dev->xmit_lock);
+       unbind_evtchn_from_irqhandler(netif->evtchn, netif);
+       netif_deschedule_work(netif);
 }
 
 static int net_open(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    if (netif->status == CONNECTED)
-        __netif_up(netif);
-    netif_start_queue(dev);
-    return 0;
+       netif_t *netif = netdev_priv(dev);
+       if (netif->status == CONNECTED)
+               __netif_up(netif);
+       netif_start_queue(dev);
+       return 0;
 }
 
 static int net_close(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    netif_stop_queue(dev);
-    if (netif->status == CONNECTED)
-        __netif_down(netif);
-    return 0;
+       netif_t *netif = netdev_priv(dev);
+       netif_stop_queue(dev);
+       if (netif->status == CONNECTED)
+               __netif_down(netif);
+       return 0;
 }
 
 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
 {
-    int err = 0, i;
-    struct net_device *dev;
-    netif_t *netif;
-    char name[IFNAMSIZ] = {};
-
-    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-    if (dev == NULL) {
-        DPRINTK("Could not create netif: out of memory\n");
-        return NULL;
-    }
-
-    netif = netdev_priv(dev);
-    memset(netif, 0, sizeof(*netif));
-    netif->domid  = domid;
-    netif->handle = handle;
-    netif->status = DISCONNECTED;
-    atomic_set(&netif->refcnt, 0);
-    netif->dev = dev;
-
-    netif->credit_bytes = netif->remaining_credit = ~0UL;
-    netif->credit_usec  = 0UL;
-    init_timer(&netif->credit_timeout);
-
-    dev->hard_start_xmit = netif_be_start_xmit;
-    dev->get_stats       = netif_be_get_stats;
-    dev->open            = net_open;
-    dev->stop            = net_close;
-    dev->features        = NETIF_F_NO_CSUM;
-
-    /* Disable queuing. */
-    dev->tx_queue_len = 0;
-
-    for (i = 0; i < ETH_ALEN; i++)
-       if (be_mac[i] != 0)
-           break;
-    if (i == ETH_ALEN) {
-        /*
-         * Initialise a dummy MAC address. We choose the numerically largest
-         * non-broadcast address to prevent the address getting stolen by an
-         * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
-         */ 
-        memset(dev->dev_addr, 0xFF, ETH_ALEN);
-        dev->dev_addr[0] &= ~0x01;
-    } else
-        memcpy(dev->dev_addr, be_mac, ETH_ALEN);
-
-    rtnl_lock();
-    err = register_netdevice(dev);
-    rtnl_unlock();
-    if (err) {
-        DPRINTK("Could not register new net device %s: err=%d\n",
-                dev->name, err);
-        free_netdev(dev);
-        return NULL;
-    }
-
-    DPRINTK("Successfully created netif\n");
-    return netif;
+       int err = 0, i;
+       struct net_device *dev;
+       netif_t *netif;
+       char name[IFNAMSIZ] = {};
+
+       snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+       dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+       if (dev == NULL) {
+               DPRINTK("Could not create netif: out of memory\n");
+               return NULL;
+       }
+
+       netif = netdev_priv(dev);
+       memset(netif, 0, sizeof(*netif));
+       netif->domid  = domid;
+       netif->handle = handle;
+       netif->status = DISCONNECTED;
+       atomic_set(&netif->refcnt, 0);
+       netif->dev = dev;
+
+       netif->credit_bytes = netif->remaining_credit = ~0UL;
+       netif->credit_usec  = 0UL;
+       init_timer(&netif->credit_timeout);
+
+       dev->hard_start_xmit = netif_be_start_xmit;
+       dev->get_stats       = netif_be_get_stats;
+       dev->open            = net_open;
+       dev->stop            = net_close;
+       dev->features        = NETIF_F_NO_CSUM;
+
+       /* Disable queuing. */
+       dev->tx_queue_len = 0;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               if (be_mac[i] != 0)
+                       break;
+       if (i == ETH_ALEN) {
+               /*
+                * Initialise a dummy MAC address. We choose the numerically
+                * largest non-broadcast address to prevent the address getting
+                * stolen by an Ethernet bridge for STP purposes.
+                 * (FE:FF:FF:FF:FF:FF) 
+                */ 
+               memset(dev->dev_addr, 0xFF, ETH_ALEN);
+               dev->dev_addr[0] &= ~0x01;
+       } else
+               memcpy(dev->dev_addr, be_mac, ETH_ALEN);
+
+       rtnl_lock();
+       err = register_netdevice(dev);
+       rtnl_unlock();
+       if (err) {
+               DPRINTK("Could not register new net device %s: err=%d\n",
+                       dev->name, err);
+               free_netdev(dev);
+               return NULL;
+       }
+
+       DPRINTK("Successfully created netif\n");
+       return netif;
 }
 
 static int map_frontend_pages(netif_t *netif, unsigned long localaddr,
@@ -116,191 +117,204 @@
                               unsigned long rx_ring_ref)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    struct gnttab_map_grant_ref op;
-
-    /* Map: Use the Grant table reference */
-    op.host_addr = localaddr;
-    op.flags     = GNTMAP_host_map;
-    op.ref       = tx_ring_ref;
-    op.dom       = netif->domid;
+       struct gnttab_map_grant_ref op;
+
+       /* Map: Use the Grant table reference */
+       op.host_addr = localaddr;
+       op.flags     = GNTMAP_host_map;
+       op.ref       = tx_ring_ref;
+       op.dom       = netif->domid;
     
-    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
-    if (op.handle < 0) { 
-        DPRINTK(" Grant table operation failure mapping tx_ring_ref!\n");
-        return op.handle;
-    }
-
-    netif->tx_shmem_ref    = tx_ring_ref;
-    netif->tx_shmem_handle = op.handle;
-    netif->tx_shmem_vaddr  = localaddr;
-
-    /* Map: Use the Grant table reference */
-    op.host_addr = localaddr + PAGE_SIZE;
-    op.flags     = GNTMAP_host_map;
-    op.ref       = rx_ring_ref;
-    op.dom       = netif->domid;
-
-    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
-    if (op.handle < 0) { 
-        DPRINTK(" Grant table operation failure mapping rx_ring_ref!\n");
-        return op.handle;
-    }
-
-    netif->rx_shmem_ref    = rx_ring_ref;
-    netif->rx_shmem_handle = op.handle;
-    netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
+       BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
+       if (op.handle < 0) { 
+               DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
+               return op.handle;
+       }
+
+       netif->tx_shmem_ref    = tx_ring_ref;
+       netif->tx_shmem_handle = op.handle;
+       netif->tx_shmem_vaddr  = localaddr;
+
+       /* Map: Use the Grant table reference */
+       op.host_addr = localaddr + PAGE_SIZE;
+       op.flags     = GNTMAP_host_map;
+       op.ref       = rx_ring_ref;
+       op.dom       = netif->domid;
+
+       BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
+       if (op.handle < 0) { 
+               DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
+               return op.handle;
+       }
+
+       netif->rx_shmem_ref    = rx_ring_ref;
+       netif->rx_shmem_handle = op.handle;
+       netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
 
 #else
-    pgprot_t      prot = __pgprot(_KERNPG_TABLE);
-    int           err;
-
-    err = direct_remap_pfn_range(&init_mm, localaddr,
-                                 tx_ring_ref, PAGE_SIZE,
-                                 prot, netif->domid); 
+       pgprot_t prot = __pgprot(_KERNPG_TABLE);
+       int      err;
+
+       err = direct_remap_pfn_range(
+               &init_mm, localaddr,
+               tx_ring_ref, PAGE_SIZE,
+               prot, netif->domid); 
     
-    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
-                                 rx_ring_ref, PAGE_SIZE,
-                                 prot, netif->domid);
-
-    if (err)
-       return err;
+       err |= direct_remap_pfn_range(
+               &init_mm, localaddr + PAGE_SIZE,
+               rx_ring_ref, PAGE_SIZE,
+               prot, netif->domid);
+
+       if (err)
+               return err;
 #endif
 
-    return 0;
+       return 0;
 }
 
 static void unmap_frontend_pages(netif_t *netif)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    struct gnttab_unmap_grant_ref op;
-
-    op.host_addr    = netif->tx_shmem_vaddr;
-    op.handle       = netif->tx_shmem_handle;
-    op.dev_bus_addr = 0;
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
-
-    op.host_addr    = netif->rx_shmem_vaddr;
-    op.handle       = netif->rx_shmem_handle;
-    op.dev_bus_addr = 0;
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+       struct gnttab_unmap_grant_ref op;
+
+       op.host_addr    = netif->tx_shmem_vaddr;
+       op.handle       = netif->tx_shmem_handle;
+       op.dev_bus_addr = 0;
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+
+       op.host_addr    = netif->rx_shmem_vaddr;
+       op.handle       = netif->rx_shmem_handle;
+       op.dev_bus_addr = 0;
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
 #endif
 
-    return; 
+       return; 
 }
 
 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
              unsigned long rx_ring_ref, unsigned int evtchn)
 {
-    struct vm_struct *vma;
-    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
-    int err;
-
-    vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
-    if (vma == NULL)
-        return -ENOMEM;
-
-    err = map_frontend_pages(netif, (unsigned long)vma->addr, tx_ring_ref,
-                             rx_ring_ref);
-    if (err) {
-        vfree(vma->addr);
-       return err;
-    }
-
-    op.u.bind_interdomain.dom1 = DOMID_SELF;
-    op.u.bind_interdomain.dom2 = netif->domid;
-    op.u.bind_interdomain.port1 = 0;
-    op.u.bind_interdomain.port2 = evtchn;
-    err = HYPERVISOR_event_channel_op(&op);
-    if (err) {
-       unmap_frontend_pages(netif);
-       vfree(vma->addr);
-       return err;
-    }
-
-    netif->evtchn = op.u.bind_interdomain.port1;
-    netif->remote_evtchn = evtchn;
-
-    netif->tx = (netif_tx_interface_t *)vma->addr;
-    netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
-    netif->tx->resp_prod = netif->rx->resp_prod = 0;
-    netif_get(netif);
-    wmb(); /* Other CPUs see new state before interface is started. */
-
-    rtnl_lock();
-    netif->status = CONNECTED;
-    wmb();
-    if (netif_running(netif->dev))
-        __netif_up(netif);
-    rtnl_unlock();
-
-    return 0;
-}
-
-static void free_netif(void *arg)
-{
-    evtchn_op_t op = { .cmd = EVTCHNOP_close };
-    netif_t *netif = (netif_t *)arg;
-
-    /*
-     * These can't be done in netif_disconnect() because at that point there
-     * may be outstanding requests in the network stack whose asynchronous
-     * responses must still be notified to the remote driver.
-     */
-
-    op.u.close.port = netif->evtchn;
-    op.u.close.dom = DOMID_SELF;
-    HYPERVISOR_event_channel_op(&op);
-    op.u.close.port = netif->remote_evtchn;
-    op.u.close.dom = netif->domid;
-    HYPERVISOR_event_channel_op(&op);
-
-    unregister_netdev(netif->dev);
-
-    if (netif->tx) {
-       unmap_frontend_pages(netif);
-       vfree(netif->tx); /* Frees netif->rx as well. */
-    }
-
-    free_netdev(netif->dev);
-}
-
-void free_netif_callback(netif_t *netif)
-{
-    INIT_WORK(&netif->free_work, free_netif, (void *)netif);
-    schedule_work(&netif->free_work);
+       struct vm_struct *vma;
+       evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
+       int err;
+
+       vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
+       if (vma == NULL)
+               return -ENOMEM;
+
+       err = map_frontend_pages(
+               netif, (unsigned long)vma->addr, tx_ring_ref, rx_ring_ref);
+       if (err) {
+               vfree(vma->addr);
+               return err;
+       }
+
+       op.u.bind_interdomain.dom1 = DOMID_SELF;
+       op.u.bind_interdomain.dom2 = netif->domid;
+       op.u.bind_interdomain.port1 = 0;
+       op.u.bind_interdomain.port2 = evtchn;
+       err = HYPERVISOR_event_channel_op(&op);
+       if (err) {
+               unmap_frontend_pages(netif);
+               vfree(vma->addr);
+               return err;
+       }
+
+       netif->evtchn = op.u.bind_interdomain.port1;
+       netif->remote_evtchn = evtchn;
+
+       netif->tx = (netif_tx_interface_t *)vma->addr;
+       netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+       netif->tx->resp_prod = netif->rx->resp_prod = 0;
+       netif_get(netif);
+       wmb(); /* Other CPUs see new state before interface is started. */
+
+       rtnl_lock();
+       netif->status = CONNECTED;
+       wmb();
+       if (netif_running(netif->dev))
+               __netif_up(netif);
+       rtnl_unlock();
+
+       return 0;
+}
+
+static void free_netif_callback(void *arg)
+{
+       evtchn_op_t op = { .cmd = EVTCHNOP_close };
+       netif_t *netif = (netif_t *)arg;
+
+       /*
+        * These can't be done in netif_disconnect() because at that point
+        * there may be outstanding requests in the network stack whose
+        * asynchronous responses must still be notified to the remote driver.
+        */
+
+       op.u.close.port = netif->evtchn;
+       op.u.close.dom = DOMID_SELF;
+       HYPERVISOR_event_channel_op(&op);
+       op.u.close.port = netif->remote_evtchn;
+       op.u.close.dom = netif->domid;
+       HYPERVISOR_event_channel_op(&op);
+
+       unregister_netdev(netif->dev);
+
+       if (netif->tx) {
+               unmap_frontend_pages(netif);
+               vfree(netif->tx); /* Frees netif->rx as well. */
+       }
+
+       free_netdev(netif->dev);
+}
+
+void free_netif(netif_t *netif)
+{
+       INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
+       schedule_work(&netif->free_work);
 }
 
 void netif_creditlimit(netif_t *netif)
 {
 #if 0
-    /* Set the credit limit (reset remaining credit to new limit). */
-    netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
-    netif->credit_usec = creditlimit->period_usec;
-
-    if (netif->status == CONNECTED) {
-        /*
-         * Schedule work so that any packets waiting under previous credit 
-         * limit are dealt with (acts like a replenishment point).
-         */
-        netif->credit_timeout.expires = jiffies;
-        netif_schedule_work(netif);
-    }
+       /* Set the credit limit (reset remaining credit to new limit). */
+       netif->credit_bytes     = creditlimit->credit_bytes;
+       netif->remaining_credit = creditlimit->credit_bytes;
+       netif->credit_usec      = creditlimit->period_usec;
+
+       if (netif->status == CONNECTED) {
+               /*
+                * Schedule work so that any packets waiting under previous
+                * credit limit are dealt with (acts as a replenishment point).
+                */
+               netif->credit_timeout.expires = jiffies;
+               netif_schedule_work(netif);
+       }
 #endif
 }
 
 int netif_disconnect(netif_t *netif)
 {
 
-    if (netif->status == CONNECTED) {
-        rtnl_lock();
-        netif->status = DISCONNECTING;
-        wmb();
-        if (netif_running(netif->dev))
-            __netif_down(netif);
-        rtnl_unlock();
-        netif_put(netif);
-        return 0; /* Caller should not send response message. */
-    }
-
-    return 1;
-}
+       if (netif->status == CONNECTED) {
+               rtnl_lock();
+               netif->status = DISCONNECTING;
+               wmb();
+               if (netif_running(netif->dev))
+                       __netif_down(netif);
+               rtnl_unlock();
+               netif_put(netif);
+               return 0; /* Caller should not send response message. */
+       }
+
+       return 1;
+}
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep 16 
18:07:50 2005
@@ -57,8 +57,8 @@
 #define PKT_PROT_LEN 64
 
 static struct {
-    netif_tx_request_t req;
-    netif_t *netif;
+       netif_tx_request_t req;
+       netif_t *netif;
 } pending_tx_info[MAX_PENDING_REQS];
 static u16 pending_ring[MAX_PENDING_REQS];
 typedef unsigned int PEND_RING_IDX;
@@ -91,49 +91,49 @@
 
 static unsigned long alloc_mfn(void)
 {
-    unsigned long mfn = 0, flags;
-    struct xen_memory_reservation reservation = {
-        .extent_start = mfn_list,
-        .nr_extents   = MAX_MFN_ALLOC,
-        .extent_order = 0,
-        .domid        = DOMID_SELF
-    };
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( unlikely(alloc_index == 0) )
-        alloc_index = HYPERVISOR_memory_op(
-            XENMEM_increase_reservation, &reservation);
-    if ( alloc_index != 0 )
-        mfn = mfn_list[--alloc_index];
-    spin_unlock_irqrestore(&mfn_lock, flags);
-    return mfn;
+       unsigned long mfn = 0, flags;
+       struct xen_memory_reservation reservation = {
+               .extent_start = mfn_list,
+               .nr_extents   = MAX_MFN_ALLOC,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       spin_lock_irqsave(&mfn_lock, flags);
+       if ( unlikely(alloc_index == 0) )
+               alloc_index = HYPERVISOR_memory_op(
+                       XENMEM_increase_reservation, &reservation);
+       if ( alloc_index != 0 )
+               mfn = mfn_list[--alloc_index];
+       spin_unlock_irqrestore(&mfn_lock, flags);
+       return mfn;
 }
 
 #ifndef CONFIG_XEN_NETDEV_GRANT
 static void free_mfn(unsigned long mfn)
 {
-    unsigned long flags;
-    struct xen_memory_reservation reservation = {
-        .extent_start = &mfn,
-        .nr_extents   = 1,
-        .extent_order = 0,
-        .domid        = DOMID_SELF
-    };
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( alloc_index != MAX_MFN_ALLOC )
-        mfn_list[alloc_index++] = mfn;
-    else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation)
-              != 1 )
-        BUG();
-    spin_unlock_irqrestore(&mfn_lock, flags);
+       unsigned long flags;
+       struct xen_memory_reservation reservation = {
+               .extent_start = &mfn,
+               .nr_extents   = 1,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       spin_lock_irqsave(&mfn_lock, flags);
+       if ( alloc_index != MAX_MFN_ALLOC )
+               mfn_list[alloc_index++] = mfn;
+       else
+               BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                           &reservation) != 1);
+       spin_unlock_irqrestore(&mfn_lock, flags);
 }
 #endif
 
 static inline void maybe_schedule_tx_action(void)
 {
-    smp_mb();
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&net_schedule_list) )
-        tasklet_schedule(&net_tx_tasklet);
+       smp_mb();
+       if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+           !list_empty(&net_schedule_list))
+               tasklet_schedule(&net_tx_tasklet);
 }
 
 /*
@@ -142,77 +142,77 @@
  */
 static inline int is_xen_skb(struct sk_buff *skb)
 {
-    extern kmem_cache_t *skbuff_cachep;
-    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-    return (cp == skbuff_cachep);
+       extern kmem_cache_t *skbuff_cachep;
+       kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+       return (cp == skbuff_cachep);
 }
 
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-
-    ASSERT(skb->dev == dev);
-
-    /* Drop the packet if the target domain has no receive buffers. */
-    if ( !netif->active || 
-         (netif->rx_req_cons == netif->rx->req_prod) ||
-         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
-        goto drop;
-
-    /*
-     * We do not copy the packet unless:
-     *  1. The data is shared; or
-     *  2. The data is not allocated from our special cache.
-     * NB. We also couldn't cope with fragmented packets, but we won't get
-     *     any because we not advertise the NETIF_F_SG feature.
-     */
-    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
-    {
-        int hlen = skb->data - skb->head;
-        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
-        if ( unlikely(nskb == NULL) )
-            goto drop;
-        skb_reserve(nskb, hlen);
-        __skb_put(nskb, skb->len);
-        if (skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen))
-            BUG();
-        nskb->dev = skb->dev;
-        nskb->proto_csum_valid = skb->proto_csum_valid;
-        dev_kfree_skb(skb);
-        skb = nskb;
-    }
+       netif_t *netif = netdev_priv(dev);
+
+       ASSERT(skb->dev == dev);
+
+       /* Drop the packet if the target domain has no receive buffers. */
+       if (!netif->active || 
+           (netif->rx_req_cons == netif->rx->req_prod) ||
+           ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
+               goto drop;
+
+       /*
+        * We do not copy the packet unless:
+        *  1. The data is shared; or
+        *  2. The data is not allocated from our special cache.
+        * NB. We also couldn't cope with fragmented packets, but we won't get
+        *     any because we not advertise the NETIF_F_SG feature.
+        */
+       if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
+               int hlen = skb->data - skb->head;
+               struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+               if ( unlikely(nskb == NULL) )
+                       goto drop;
+               skb_reserve(nskb, hlen);
+               __skb_put(nskb, skb->len);
+               BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen,
+                                    skb->len + hlen));
+               nskb->dev = skb->dev;
+               nskb->proto_csum_valid = skb->proto_csum_valid;
+               dev_kfree_skb(skb);
+               skb = nskb;
+       }
 #ifdef CONFIG_XEN_NETDEV_GRANT
 #ifdef DEBUG_GRANT
-    printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d id=%04x 
gr=%04x\n",
-           netif->rx->req_prod,
-           netif->rx_req_cons,
-           netif->rx->ring[
-                  MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
-           netif->rx->ring[
-                  MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
-#endif
-#endif
-    netif->rx_req_cons++;
-    netif_get(netif);
-
-    skb_queue_tail(&rx_queue, skb);
-    tasklet_schedule(&net_rx_tasklet);
-
-    return 0;
+       printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
+              "id=%04x gr=%04x\n",
+              netif->rx->req_prod,
+              netif->rx_req_cons,
+              netif->rx->ring[
+                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
+              netif->rx->ring[
+                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
+#endif
+#endif
+       netif->rx_req_cons++;
+       netif_get(netif);
+
+       skb_queue_tail(&rx_queue, skb);
+       tasklet_schedule(&net_rx_tasklet);
+
+       return 0;
 
  drop:
-    netif->stats.tx_dropped++;
-    dev_kfree_skb(skb);
-    return 0;
+       netif->stats.tx_dropped++;
+       dev_kfree_skb(skb);
+       return 0;
 }
 
 #if 0
 static void xen_network_done_notify(void)
 {
-    static struct net_device *eth0_dev = NULL;
-    if ( unlikely(eth0_dev == NULL) )
-        eth0_dev = __dev_get_by_name("eth0");
-    netif_rx_schedule(eth0_dev);
+       static struct net_device *eth0_dev = NULL;
+       if (unlikely(eth0_dev == NULL))
+               eth0_dev = __dev_get_by_name("eth0");
+       netif_rx_schedule(eth0_dev);
 }
 /* 
  * Add following to poll() function in NAPI driver (Tigon3 is example):
@@ -221,658 +221,654 @@
  */
 int xen_network_done(void)
 {
-    return skb_queue_empty(&rx_queue);
+       return skb_queue_empty(&rx_queue);
 }
 #endif
 
 static void net_rx_action(unsigned long unused)
 {
-    netif_t *netif = NULL; 
-    s8 status;
-    u16 size, id, evtchn;
-    multicall_entry_t *mcl;
-    mmu_update_t *mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_transfer_t *gop;
-#else
-    struct mmuext_op *mmuext;
-#endif
-    unsigned long vdata, old_mfn, new_mfn;
-    struct sk_buff_head rxq;
-    struct sk_buff *skb;
-    u16 notify_list[NETIF_RX_RING_SIZE];
-    int notify_nr = 0;
-
-    skb_queue_head_init(&rxq);
-
-    mcl = rx_mcl;
-    mmu = rx_mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gop = grant_rx_op;
-#else
-    mmuext = rx_mmuext;
-#endif
-
-    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
-    {
-        netif   = netdev_priv(skb->dev);
-        vdata   = (unsigned long)skb->data;
-        old_mfn = virt_to_mfn(vdata);
-
-        /* Memory squeeze? Back off for an arbitrary while. */
-        if ( (new_mfn = alloc_mfn()) == 0 )
-        {
-            if ( net_ratelimit() )
-                WPRINTK("Memory squeeze in netback driver.\n");
-            mod_timer(&net_timer, jiffies + HZ);
-            skb_queue_head(&rx_queue, skb);
-            break;
-        }
-        /*
-         * Set the new P2M table entry before reassigning the old data page.
-         * Heed the comment in pgtable-2level.h:pte_page(). :-)
-         */
-        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-
-        MULTI_update_va_mapping(mcl, vdata,
-                               pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
-        mcl++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        gop->mfn = old_mfn;
-        gop->domid = netif->domid;
-        gop->ref = netif->rx->ring[
-        MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
-        netif->rx_resp_prod_copy++;
-        gop++;
-#else
-        mcl->op = __HYPERVISOR_mmuext_op;
-        mcl->args[0] = (unsigned long)mmuext;
-        mcl->args[1] = 1;
-        mcl->args[2] = 0;
-        mcl->args[3] = netif->domid;
-        mcl++;
-
-        mmuext->cmd = MMUEXT_REASSIGN_PAGE;
-        mmuext->arg1.mfn = old_mfn;
-        mmuext++;
-#endif
-        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | 
MMU_MACHPHYS_UPDATE;
-        mmu->val = __pa(vdata) >> PAGE_SHIFT;  
-        mmu++;
-
-        __skb_queue_tail(&rxq, skb);
+       netif_t *netif = NULL; 
+       s8 status;
+       u16 size, id, evtchn;
+       multicall_entry_t *mcl;
+       mmu_update_t *mmu;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gnttab_transfer_t *gop;
+#else
+       struct mmuext_op *mmuext;
+#endif
+       unsigned long vdata, old_mfn, new_mfn;
+       struct sk_buff_head rxq;
+       struct sk_buff *skb;
+       u16 notify_list[NETIF_RX_RING_SIZE];
+       int notify_nr = 0;
+
+       skb_queue_head_init(&rxq);
+
+       mcl = rx_mcl;
+       mmu = rx_mmu;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gop = grant_rx_op;
+#else
+       mmuext = rx_mmuext;
+#endif
+
+       while ((skb = skb_dequeue(&rx_queue)) != NULL) {
+               netif   = netdev_priv(skb->dev);
+               vdata   = (unsigned long)skb->data;
+               old_mfn = virt_to_mfn(vdata);
+
+               /* Memory squeeze? Back off for an arbitrary while. */
+               if ((new_mfn = alloc_mfn()) == 0) {
+                       if ( net_ratelimit() )
+                               WPRINTK("Memory squeeze in netback driver.\n");
+                       mod_timer(&net_timer, jiffies + HZ);
+                       skb_queue_head(&rx_queue, skb);
+                       break;
+               }
+               /*
+                * Set the new P2M table entry before reassigning the old data
+                * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
+                */
+               phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] =
+                       new_mfn;
+
+               MULTI_update_va_mapping(mcl, vdata,
+                                       pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
+               mcl++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               gop->mfn = old_mfn;
+               gop->domid = netif->domid;
+               gop->ref = netif->rx->ring[
+                       MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
+               netif->rx_resp_prod_copy++;
+               gop++;
+#else
+               mcl->op = __HYPERVISOR_mmuext_op;
+               mcl->args[0] = (unsigned long)mmuext;
+               mcl->args[1] = 1;
+               mcl->args[2] = 0;
+               mcl->args[3] = netif->domid;
+               mcl++;
+
+               mmuext->cmd = MMUEXT_REASSIGN_PAGE;
+               mmuext->arg1.mfn = old_mfn;
+               mmuext++;
+#endif
+               mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
+                       MMU_MACHPHYS_UPDATE;
+               mmu->val = __pa(vdata) >> PAGE_SHIFT;  
+               mmu++;
+
+               __skb_queue_tail(&rxq, skb);
 
 #ifdef DEBUG_GRANT
-        dump_packet('a', old_mfn, vdata);
-#endif
-        /* Filled the batch queue? */
-        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
-            break;
-    }
-
-    if ( mcl == rx_mcl )
-        return;
-
-    mcl->op = __HYPERVISOR_mmu_update;
-    mcl->args[0] = (unsigned long)rx_mmu;
-    mcl->args[1] = mmu - rx_mmu;
-    mcl->args[2] = 0;
-    mcl->args[3] = DOMID_SELF;
-    mcl++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#else
-    mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#endif
-    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
-        BUG();
-
-    mcl = rx_mcl;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
-                                 gop - grant_rx_op)) { 
-        /* 
-        ** The other side has given us a bad grant ref, or has no headroom, 
-        ** or has gone away. Unfortunately the current grant table code 
-        ** doesn't inform us which is the case, so not much we can do. 
-        */
-        DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) %d "
-                "packets.\n", grant_rx_op[0].domid, gop - grant_rx_op); 
-    }
-    gop = grant_rx_op;
-#else
-    mmuext = rx_mmuext;
-#endif
-    while ( (skb = __skb_dequeue(&rxq)) != NULL )
-    {
-        netif   = netdev_priv(skb->dev);
-        size    = skb->tail - skb->data;
-
-        /* Rederive the machine addresses. */
-        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
-#else
-        old_mfn = mmuext[0].arg1.mfn;
-#endif
-        atomic_set(&(skb_shinfo(skb)->dataref), 1);
-        skb_shinfo(skb)->nr_frags = 0;
-        skb_shinfo(skb)->frag_list = NULL;
-
-        netif->stats.tx_bytes += size;
-        netif->stats.tx_packets++;
-
-        /* The update_va_mapping() must not fail. */
-        BUG_ON(mcl[0].result != 0);
-
-        /* Check the reassignment error code. */
-        status = NETIF_RSP_OKAY;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if(gop->status != 0) { 
-            DPRINTK("Bad status %d from grant transfer to DOM%u\n", 
-                    gop->status, netif->domid);
-            /* XXX SMH: should free 'old_mfn' here */
-            status = NETIF_RSP_ERROR; 
-        } 
-#else
-        if ( unlikely(mcl[1].result != 0) )
-        {
-            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
-            free_mfn(old_mfn);
-            status = NETIF_RSP_ERROR;
-        }
-#endif
-        evtchn = netif->evtchn;
-        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
-        if ( make_rx_response(netif, id, status,
-                              (old_mfn << PAGE_SHIFT) | /* XXX */
-                              ((unsigned long)skb->data & ~PAGE_MASK),
-                              size, skb->proto_csum_valid) &&
-             (rx_notify[evtchn] == 0) )
-        {
-            rx_notify[evtchn] = 1;
-            notify_list[notify_nr++] = evtchn;
-        }
-
-        netif_put(netif);
-        dev_kfree_skb(skb);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mcl++;
-        gop++;
-#else
-        mcl += 2;
-        mmuext += 1;
-#endif
-    }
-
-    while ( notify_nr != 0 )
-    {
-        evtchn = notify_list[--notify_nr];
-        rx_notify[evtchn] = 0;
-        notify_via_evtchn(evtchn);
-    }
-
-  out: 
-    /* More work to do? */
-    if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
-        tasklet_schedule(&net_rx_tasklet);
+               dump_packet('a', old_mfn, vdata);
+#endif
+               /* Filled the batch queue? */
+               if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
+                       break;
+       }
+
+       if (mcl == rx_mcl)
+               return;
+
+       mcl->op = __HYPERVISOR_mmu_update;
+       mcl->args[0] = (unsigned long)rx_mmu;
+       mcl->args[1] = mmu - rx_mmu;
+       mcl->args[2] = 0;
+       mcl->args[3] = DOMID_SELF;
+       mcl++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+#else
+       mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+#endif
+       BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
+
+       mcl = rx_mcl;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
+                                    gop - grant_rx_op)) { 
+               /*
+                * The other side has given us a bad grant ref, or has no 
+                * headroom, or has gone away. Unfortunately the current grant
+                * table code doesn't inform us which is the case, so not much
+                * we can do. 
+                */
+               DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) "
+                       "%d packets.\n",
+                       grant_rx_op[0].domid, gop - grant_rx_op); 
+       }
+       gop = grant_rx_op;
+#else
+       mmuext = rx_mmuext;
+#endif
+       while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               netif   = netdev_priv(skb->dev);
+               size    = skb->tail - skb->data;
+
+               /* Rederive the machine addresses. */
+               new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
+#else
+               old_mfn = mmuext[0].arg1.mfn;
+#endif
+               atomic_set(&(skb_shinfo(skb)->dataref), 1);
+               skb_shinfo(skb)->nr_frags = 0;
+               skb_shinfo(skb)->frag_list = NULL;
+
+               netif->stats.tx_bytes += size;
+               netif->stats.tx_packets++;
+
+               /* The update_va_mapping() must not fail. */
+               BUG_ON(mcl[0].result != 0);
+
+               /* Check the reassignment error code. */
+               status = NETIF_RSP_OKAY;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if(gop->status != 0) { 
+                       DPRINTK("Bad status %d from grant transfer to DOM%u\n",
+                               gop->status, netif->domid);
+                       /* XXX SMH: should free 'old_mfn' here */
+                       status = NETIF_RSP_ERROR; 
+               } 
+#else
+               if (unlikely(mcl[1].result != 0)) {
+                       DPRINTK("Failed MMU update transferring to DOM%u\n",
+                               netif->domid);
+                       free_mfn(old_mfn);
+                       status = NETIF_RSP_ERROR;
+               }
+#endif
+               evtchn = netif->evtchn;
+               id = netif->rx->ring[
+                       MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+               if (make_rx_response(netif, id, status,
+                                    (old_mfn << PAGE_SHIFT) | /* XXX */
+                                    ((unsigned long)skb->data & ~PAGE_MASK),
+                                    size, skb->proto_csum_valid) &&
+                   (rx_notify[evtchn] == 0)) {
+                       rx_notify[evtchn] = 1;
+                       notify_list[notify_nr++] = evtchn;
+               }
+
+               netif_put(netif);
+               dev_kfree_skb(skb);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mcl++;
+               gop++;
+#else
+               mcl += 2;
+               mmuext += 1;
+#endif
+       }
+
+       while (notify_nr != 0) {
+               evtchn = notify_list[--notify_nr];
+               rx_notify[evtchn] = 0;
+               notify_via_evtchn(evtchn);
+       }
+
+       /* More work to do? */
+       if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
+               tasklet_schedule(&net_rx_tasklet);
 #if 0
-    else
-        xen_network_done_notify();
+       else
+               xen_network_done_notify();
 #endif
 }
 
 static void net_alarm(unsigned long unused)
 {
-    tasklet_schedule(&net_rx_tasklet);
+       tasklet_schedule(&net_rx_tasklet);
 }
 
 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    return &netif->stats;
+       netif_t *netif = netdev_priv(dev);
+       return &netif->stats;
 }
 
 static int __on_net_schedule_list(netif_t *netif)
 {
-    return netif->list.next != NULL;
+       return netif->list.next != NULL;
 }
 
 static void remove_from_net_schedule_list(netif_t *netif)
 {
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( likely(__on_net_schedule_list(netif)) )
-    {
-        list_del(&netif->list);
-        netif->list.next = NULL;
-        netif_put(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
+       spin_lock_irq(&net_schedule_list_lock);
+       if (likely(__on_net_schedule_list(netif))) {
+               list_del(&netif->list);
+               netif->list.next = NULL;
+               netif_put(netif);
+       }
+       spin_unlock_irq(&net_schedule_list_lock);
 }
 
 static void add_to_net_schedule_list_tail(netif_t *netif)
 {
-    if ( __on_net_schedule_list(netif) )
-        return;
-
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( !__on_net_schedule_list(netif) && netif->active )
-    {
-        list_add_tail(&netif->list, &net_schedule_list);
-        netif_get(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
+       if (__on_net_schedule_list(netif))
+               return;
+
+       spin_lock_irq(&net_schedule_list_lock);
+       if (!__on_net_schedule_list(netif) && netif->active) {
+               list_add_tail(&netif->list, &net_schedule_list);
+               netif_get(netif);
+       }
+       spin_unlock_irq(&net_schedule_list_lock);
 }
 
 void netif_schedule_work(netif_t *netif)
 {
-    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
+       if ((netif->tx_req_cons != netif->tx->req_prod) &&
+           ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
+               add_to_net_schedule_list_tail(netif);
+               maybe_schedule_tx_action();
+       }
 }
 
 void netif_deschedule_work(netif_t *netif)
 {
-    remove_from_net_schedule_list(netif);
+       remove_from_net_schedule_list(netif);
 }
 
 
 static void tx_credit_callback(unsigned long data)
 {
-    netif_t *netif = (netif_t *)data;
-    netif->remaining_credit = netif->credit_bytes;
-    netif_schedule_work(netif);
+       netif_t *netif = (netif_t *)data;
+       netif->remaining_credit = netif->credit_bytes;
+       netif_schedule_work(netif);
 }
 
 inline static void net_tx_action_dealloc(void)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_unmap_grant_ref_t *gop;
-#else
-    multicall_entry_t *mcl;
-#endif
-    u16 pending_idx;
-    PEND_RING_IDX dc, dp;
-    netif_t *netif;
-
-    dc = dealloc_cons;
-    dp = dealloc_prod;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    /*
-     * Free up any grants we have finished using
-     */
-    gop = tx_unmap_ops;
-    while ( dc != dp )
-    {
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-        gop->host_addr    = MMAP_VADDR(pending_idx);
-        gop->dev_bus_addr = 0;
-        gop->handle       = grant_tx_ref[pending_idx];
-        grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
-        gop++;
-    }
-    BUG_ON(HYPERVISOR_grant_table_op(
-               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
-#else
-    mcl = tx_mcl;
-    while ( dc != dp )
-    {
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-       MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
-                               __pte(0), 0);
-        mcl++;     
-    }
-
-    mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
-        BUG();
-
-    mcl = tx_mcl;
-#endif
-    while ( dealloc_cons != dp )
-    {
+       gnttab_unmap_grant_ref_t *gop;
+#else
+       multicall_entry_t *mcl;
+#endif
+       u16 pending_idx;
+       PEND_RING_IDX dc, dp;
+       netif_t *netif;
+
+       dc = dealloc_cons;
+       dp = dealloc_prod;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       /*
+        * Free up any grants we have finished using
+        */
+       gop = tx_unmap_ops;
+       while (dc != dp) {
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+               gop->host_addr    = MMAP_VADDR(pending_idx);
+               gop->dev_bus_addr = 0;
+               gop->handle       = grant_tx_ref[pending_idx];
+               grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
+               gop++;
+       }
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
+#else
+       mcl = tx_mcl;
+       while (dc != dp) {
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+               MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
+                                       __pte(0), 0);
+               mcl++;     
+       }
+
+       mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
+
+       mcl = tx_mcl;
+#endif
+       while (dealloc_cons != dp) {
 #ifndef CONFIG_XEN_NETDEV_GRANT
-        /* The update_va_mapping() must not fail. */
-        BUG_ON(mcl[0].result != 0);
-#endif
-
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-
-        netif = pending_tx_info[pending_idx].netif;
-
-        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
-                         NETIF_RSP_OKAY);
+               /* The update_va_mapping() must not fail. */
+               BUG_ON(mcl[0].result != 0);
+#endif
+
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+               netif = pending_tx_info[pending_idx].netif;
+
+               make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
+                                NETIF_RSP_OKAY);
         
-        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-
-        /*
-         * Scheduling checks must happen after the above response is posted.
-         * This avoids a possible race with a guest OS on another CPU if that
-         * guest is testing against 'resp_prod' when deciding whether to notify
-         * us when it queues additional packets.
-         */
-        mb();
-        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-            add_to_net_schedule_list_tail(netif);
+               pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+               /*
+                * Scheduling checks must happen after the above response is
+                * posted. This avoids a possible race with a guest OS on
+                * another CPU if that guest is testing against 'resp_prod'
+                * when deciding whether to notify us when it queues additional
+                 * packets.
+                */
+               mb();
+               if ((netif->tx_req_cons != netif->tx->req_prod) &&
+                   ((netif->tx_req_cons-netif->tx_resp_prod) !=
+                    NETIF_TX_RING_SIZE))
+                       add_to_net_schedule_list_tail(netif);
         
-        netif_put(netif);
+               netif_put(netif);
 
 #ifndef CONFIG_XEN_NETDEV_GRANT
-        mcl++;
-#endif
-    }
-
+               mcl++;
+#endif
+       }
 }
 
 /* Called after netfront has transmitted */
 static void net_tx_action(unsigned long unused)
 {
-    struct list_head *ent;
-    struct sk_buff *skb;
-    netif_t *netif;
-    netif_tx_request_t txreq;
-    u16 pending_idx;
-    NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_map_grant_ref_t *mop;
-#else
-    multicall_entry_t *mcl;
-#endif
-    unsigned int data_len;
-
-    if ( dealloc_cons != dealloc_prod )
-        net_tx_action_dealloc();
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    mop = tx_map_ops;
-#else
-    mcl = tx_mcl;
-#endif
-    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-            !list_empty(&net_schedule_list) )
-    {
-        /* Get a netif from the list with work to do. */
-        ent = net_schedule_list.next;
-        netif = list_entry(ent, netif_t, list);
-        netif_get(netif);
-        remove_from_net_schedule_list(netif);
-
-        /* Work to do? */
-        i = netif->tx_req_cons;
-        if ( (i == netif->tx->req_prod) ||
-             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
-        {
-            netif_put(netif);
-            continue;
-        }
-
-        rmb(); /* Ensure that we see the request before we copy it. */
-        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
-               sizeof(txreq));
-        /* Credit-based scheduling. */
-        if ( txreq.size > netif->remaining_credit )
-        {
-            unsigned long now = jiffies;
-            unsigned long next_credit = 
-                netif->credit_timeout.expires +
-                msecs_to_jiffies(netif->credit_usec / 1000);
-
-            /* Timer could already be pending in some rare cases. */
-            if ( timer_pending(&netif->credit_timeout) )
-                break;
-
-            /* Already passed the point at which we can replenish credit? */
-            if ( time_after_eq(now, next_credit) )
-            {
-                netif->credit_timeout.expires = now;
-                netif->remaining_credit = netif->credit_bytes;
-            }
-
-            /* Still too big to send right now? Then set a timer callback. */
-            if ( txreq.size > netif->remaining_credit )
-            {
-                netif->remaining_credit = 0;
-                netif->credit_timeout.expires  = next_credit;
-                netif->credit_timeout.data     = (unsigned long)netif;
-                netif->credit_timeout.function = tx_credit_callback;
-                add_timer_on(&netif->credit_timeout, smp_processor_id());
-                break;
-            }
-        }
-        netif->remaining_credit -= txreq.size;
-
-        /*
-         * Why the barrier? It ensures that the frontend sees updated req_cons
-         * before we check for more work to schedule.
-         */
-        netif->tx->req_cons = ++netif->tx_req_cons;
-        mb();
-
-        netif_schedule_work(netif);
-
-        if ( unlikely(txreq.size < ETH_HLEN) || 
-             unlikely(txreq.size > ETH_FRAME_LEN) )
-        {
-            DPRINTK("Bad packet size: %d\n", txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue; 
-        }
-
-        /* No crossing a page boundary as the payload mustn't fragment. */
-        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
-        {
-            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
-                    txreq.addr, txreq.size, 
-                    (txreq.addr &~PAGE_MASK) + txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue;
-        }
-
-        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
-        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
-        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
-        {
-            DPRINTK("Can't allocate a skb in start_xmit.\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            break;
-        }
-
-        /* Packets passed to netif_rx() must have some headroom. */
-        skb_reserve(skb, 16);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mop->host_addr = MMAP_VADDR(pending_idx);
-        mop->dom       = netif->domid;
-        mop->ref       = txreq.addr >> PAGE_SHIFT;
-        mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
-        mop++;
-#else
-       MULTI_update_va_mapping_otherdomain(
-           mcl, MMAP_VADDR(pending_idx),
-           pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
-           0, netif->domid);
-
-        mcl++;
-#endif
-
-        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
-        pending_tx_info[pending_idx].netif = netif;
-        *((u16 *)skb->data) = pending_idx;
-
-        __skb_queue_tail(&tx_queue, skb);
-
-        pending_cons++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if ( (mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops) )
-            break;
-#else
-        /* Filled the batch queue? */
-        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
-            break;
-#endif
-    }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    if ( mop == tx_map_ops )
-        return;
-
-    BUG_ON(HYPERVISOR_grant_table_op(
-        GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
-
-    mop = tx_map_ops;
-#else
-    if ( mcl == tx_mcl )
-        return;
-
-    BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
-    mcl = tx_mcl;
-#endif
-    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
-    {
-        pending_idx = *((u16 *)skb->data);
-        netif       = pending_tx_info[pending_idx].netif;
-        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
-
-        /* Check the remap error code. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if ( unlikely(mop->handle < 0) )
-        {
-            printk(KERN_ALERT "#### netback grant fails\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            kfree_skb(skb);
-            mop++;
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            continue;
-        }
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
-                             FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
-        grant_tx_ref[pending_idx] = mop->handle;
-#else
-        if ( unlikely(mcl[0].result != 0) )
-        {
-            DPRINTK("Bad page frame\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            kfree_skb(skb);
-            mcl++;
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            continue;
-        }
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
-            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-#endif
-
-        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
-        __skb_put(skb, data_len);
-        memcpy(skb->data, 
-               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
-               data_len);
-        if ( data_len < txreq.size )
-        {
-            /* Append the packet payload as a fragment. */
-            skb_shinfo(skb)->frags[0].page        = 
-                virt_to_page(MMAP_VADDR(pending_idx));
-            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
-            skb_shinfo(skb)->frags[0].page_offset = 
-                (txreq.addr + data_len) & ~PAGE_MASK;
-            skb_shinfo(skb)->nr_frags = 1;
-        }
-        else
-        {
-            /* Schedule a response immediately. */
-            netif_idx_release(pending_idx);
-        }
-
-        skb->data_len  = txreq.size - data_len;
-        skb->len      += skb->data_len;
-
-        skb->dev      = netif->dev;
-        skb->protocol = eth_type_trans(skb, skb->dev);
-
-        /* No checking needed on localhost, but remember the field is blank. */
-        skb->ip_summed        = CHECKSUM_UNNECESSARY;
-        skb->proto_csum_valid = 1;
-        skb->proto_csum_blank = txreq.csum_blank;
-
-        netif->stats.rx_bytes += txreq.size;
-        netif->stats.rx_packets++;
-
-        netif_rx(skb);
-        netif->dev->last_rx = jiffies;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mop++;
-#else
-        mcl++;
-#endif
-    }
+       struct list_head *ent;
+       struct sk_buff *skb;
+       netif_t *netif;
+       netif_tx_request_t txreq;
+       u16 pending_idx;
+       NETIF_RING_IDX i;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gnttab_map_grant_ref_t *mop;
+#else
+       multicall_entry_t *mcl;
+#endif
+       unsigned int data_len;
+
+       if (dealloc_cons != dealloc_prod)
+               net_tx_action_dealloc();
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       mop = tx_map_ops;
+#else
+       mcl = tx_mcl;
+#endif
+       while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+               !list_empty(&net_schedule_list)) {
+               /* Get a netif from the list with work to do. */
+               ent = net_schedule_list.next;
+               netif = list_entry(ent, netif_t, list);
+               netif_get(netif);
+               remove_from_net_schedule_list(netif);
+
+               /* Work to do? */
+               i = netif->tx_req_cons;
+               if ((i == netif->tx->req_prod) ||
+                   ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
+                       netif_put(netif);
+                       continue;
+               }
+
+               rmb(); /* Ensure that we see the request before we copy it. */
+               memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
+                      sizeof(txreq));
+               /* Credit-based scheduling. */
+               if (txreq.size > netif->remaining_credit) {
+                       unsigned long now = jiffies;
+                       unsigned long next_credit = 
+                               netif->credit_timeout.expires +
+                               msecs_to_jiffies(netif->credit_usec / 1000);
+
+                       /* Timer could already be pending in rare cases. */
+                       if (timer_pending(&netif->credit_timeout))
+                               break;
+
+                       /* Passed the point where we can replenish credit? */
+                       if (time_after_eq(now, next_credit)) {
+                               netif->credit_timeout.expires = now;
+                               netif->remaining_credit = netif->credit_bytes;
+                       }
+
+                       /* Still too big to send right now? Set a callback. */
+                       if (txreq.size > netif->remaining_credit) {
+                               netif->remaining_credit = 0;
+                               netif->credit_timeout.expires  = 
+                                       next_credit;
+                               netif->credit_timeout.data     =
+                                       (unsigned long)netif;
+                               netif->credit_timeout.function =
+                                       tx_credit_callback;
+                               add_timer_on(&netif->credit_timeout,
+                                            smp_processor_id());
+                               break;
+                       }
+               }
+               netif->remaining_credit -= txreq.size;
+
+               /*
+                * Why the barrier? It ensures that the frontend sees updated
+                * req_cons before we check for more work to schedule.
+                */
+               netif->tx->req_cons = ++netif->tx_req_cons;
+               mb();
+
+               netif_schedule_work(netif);
+
+               if (unlikely(txreq.size < ETH_HLEN) || 
+                   unlikely(txreq.size > ETH_FRAME_LEN)) {
+                       DPRINTK("Bad packet size: %d\n", txreq.size);
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       continue; 
+               }
+
+               /* No crossing a page as the payload mustn't fragment. */
+               if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
+                            PAGE_SIZE)) {
+                       DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
+                               txreq.addr, txreq.size, 
+                               (txreq.addr &~PAGE_MASK) + txreq.size);
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       continue;
+               }
+
+               pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+               data_len = (txreq.size > PKT_PROT_LEN) ?
+                       PKT_PROT_LEN : txreq.size;
+
+               skb = alloc_skb(data_len+16, GFP_ATOMIC);
+               if (unlikely(skb == NULL)) {
+                       DPRINTK("Can't allocate a skb in start_xmit.\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       break;
+               }
+
+               /* Packets passed to netif_rx() must have some headroom. */
+               skb_reserve(skb, 16);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mop->host_addr = MMAP_VADDR(pending_idx);
+               mop->dom       = netif->domid;
+               mop->ref       = txreq.addr >> PAGE_SHIFT;
+               mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
+               mop++;
+#else
+               MULTI_update_va_mapping_otherdomain(
+                       mcl, MMAP_VADDR(pending_idx),
+                       pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
+                       0, netif->domid);
+
+               mcl++;
+#endif
+
+               memcpy(&pending_tx_info[pending_idx].req,
+                      &txreq, sizeof(txreq));
+               pending_tx_info[pending_idx].netif = netif;
+               *((u16 *)skb->data) = pending_idx;
+
+               __skb_queue_tail(&tx_queue, skb);
+
+               pending_cons++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
+                       break;
+#else
+               /* Filled the batch queue? */
+               if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
+                       break;
+#endif
+       }
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       if (mop == tx_map_ops)
+               return;
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
+
+       mop = tx_map_ops;
+#else
+       if (mcl == tx_mcl)
+               return;
+
+       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
+
+       mcl = tx_mcl;
+#endif
+       while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+               pending_idx = *((u16 *)skb->data);
+               netif       = pending_tx_info[pending_idx].netif;
+               memcpy(&txreq, &pending_tx_info[pending_idx].req,
+                      sizeof(txreq));
+
+               /* Check the remap error code. */
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if (unlikely(mop->handle < 0)) {
+                       printk(KERN_ALERT "#### netback grant fails\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       kfree_skb(skb);
+                       mop++;
+                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
+                               pending_idx;
+                       continue;
+               }
+               phys_to_machine_mapping[
+                       __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+                       FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
+               grant_tx_ref[pending_idx] = mop->handle;
+#else
+               if (unlikely(mcl[0].result != 0)) {
+                       DPRINTK("Bad page frame\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       kfree_skb(skb);
+                       mcl++;
+                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
+                               pending_idx;
+                       continue;
+               }
+
+               phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
+                                      PAGE_SHIFT] =
+                       FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+#endif
+
+               data_len = (txreq.size > PKT_PROT_LEN) ?
+                       PKT_PROT_LEN : txreq.size;
+
+               __skb_put(skb, data_len);
+               memcpy(skb->data, 
+                      (void *)(MMAP_VADDR(pending_idx)|
+                               (txreq.addr&~PAGE_MASK)),
+                      data_len);
+               if (data_len < txreq.size) {
+                       /* Append the packet payload as a fragment. */
+                       skb_shinfo(skb)->frags[0].page        = 
+                               virt_to_page(MMAP_VADDR(pending_idx));
+                       skb_shinfo(skb)->frags[0].size        =
+                               txreq.size - data_len;
+                       skb_shinfo(skb)->frags[0].page_offset = 
+                               (txreq.addr + data_len) & ~PAGE_MASK;
+                       skb_shinfo(skb)->nr_frags = 1;
+               } else {
+                       /* Schedule a response immediately. */
+                       netif_idx_release(pending_idx);
+               }
+
+               skb->data_len  = txreq.size - data_len;
+               skb->len      += skb->data_len;
+
+               skb->dev      = netif->dev;
+               skb->protocol = eth_type_trans(skb, skb->dev);
+
+               /*
+                 * No checking needed on localhost, but remember the field is
+                 * blank. 
+                 */
+               skb->ip_summed        = CHECKSUM_UNNECESSARY;
+               skb->proto_csum_valid = 1;
+               skb->proto_csum_blank = txreq.csum_blank;
+
+               netif->stats.rx_bytes += txreq.size;
+               netif->stats.rx_packets++;
+
+               netif_rx(skb);
+               netif->dev->last_rx = jiffies;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mop++;
+#else
+               mcl++;
+#endif
+       }
 }
 
 static void netif_idx_release(u16 pending_idx)
 {
-    static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
-    unsigned long flags;
-
-    spin_lock_irqsave(&_lock, flags);
-    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
-    spin_unlock_irqrestore(&_lock, flags);
-
-    tasklet_schedule(&net_tx_tasklet);
+       static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
+       unsigned long flags;
+
+       spin_lock_irqsave(&_lock, flags);
+       dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+       spin_unlock_irqrestore(&_lock, flags);
+
+       tasklet_schedule(&net_tx_tasklet);
 }
 
 static void netif_page_release(struct page *page)
 {
-    u16 pending_idx = page - virt_to_page(mmap_vstart);
-
-    /* Ready for next use. */
-    set_page_count(page, 1);
-
-    netif_idx_release(pending_idx);
+       u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+       /* Ready for next use. */
+       set_page_count(page, 1);
+
+       netif_idx_release(pending_idx);
 }
 
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 {
-    netif_t *netif = dev_id;
-    if ( tx_work_exists(netif) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
-    return IRQ_HANDLED;
+       netif_t *netif = dev_id;
+       if (tx_work_exists(netif)) {
+               add_to_net_schedule_list_tail(netif);
+               maybe_schedule_tx_action();
+       }
+       return IRQ_HANDLED;
 }
 
 static void make_tx_response(netif_t *netif, 
                              u16      id,
                              s8       st)
 {
-    NETIF_RING_IDX i = netif->tx_resp_prod;
-    netif_tx_response_t *resp;
-
-    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
-    resp->id     = id;
-    resp->status = st;
-    wmb();
-    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    if ( i == netif->tx->event )
-        notify_via_evtchn(netif->evtchn);
+       NETIF_RING_IDX i = netif->tx_resp_prod;
+       netif_tx_response_t *resp;
+
+       resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+       resp->id     = id;
+       resp->status = st;
+       wmb();
+       netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+       mb(); /* Update producer before checking event threshold. */
+       if (i == netif->tx->event)
+               notify_via_evtchn(netif->evtchn);
 }
 
 static int make_rx_response(netif_t *netif, 
@@ -882,110 +878,120 @@
                             u16      size,
                             u16      csum_valid)
 {
-    NETIF_RING_IDX i = netif->rx_resp_prod;
-    netif_rx_response_t *resp;
-
-    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-    resp->addr       = addr;
-    resp->csum_valid = csum_valid;
-    resp->id         = id;
-    resp->status     = (s16)size;
-    if ( st < 0 )
-        resp->status = (s16)st;
-    wmb();
-    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    return (i == netif->rx->event);
+       NETIF_RING_IDX i = netif->rx_resp_prod;
+       netif_rx_response_t *resp;
+
+       resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+       resp->addr       = addr;
+       resp->csum_valid = csum_valid;
+       resp->id         = id;
+       resp->status     = (s16)size;
+       if (st < 0)
+               resp->status = (s16)st;
+       wmb();
+       netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+       mb(); /* Update producer before checking event threshold. */
+       return (i == netif->rx->event);
 }
 
 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 {
-    struct list_head *ent;
-    netif_t *netif;
-    int i = 0;
-
-    printk(KERN_ALERT "netif_schedule_list:\n");
-    spin_lock_irq(&net_schedule_list_lock);
-
-    list_for_each ( ent, &net_schedule_list )
-    {
-        netif = list_entry(ent, netif_t, list);
-        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
-               i, netif->rx_req_cons, netif->rx_resp_prod);               
-        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-               netif->tx_req_cons, netif->tx_resp_prod);
-        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
-               netif->rx->req_prod, netif->rx->resp_prod);
-        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-               netif->rx->event, netif->tx->req_prod);
-        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-               netif->tx->resp_prod, netif->tx->event);
-        i++;
-    }
-
-    spin_unlock_irq(&net_schedule_list_lock);
-    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-
-    return IRQ_HANDLED;
+       struct list_head *ent;
+       netif_t *netif;
+       int i = 0;
+
+       printk(KERN_ALERT "netif_schedule_list:\n");
+       spin_lock_irq(&net_schedule_list_lock);
+
+       list_for_each (ent, &net_schedule_list) {
+               netif = list_entry(ent, netif_t, list);
+               printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
+                      "rx_resp_prod=%08x\n",
+                      i, netif->rx_req_cons, netif->rx_resp_prod);
+               printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
+                      netif->tx_req_cons, netif->tx_resp_prod);
+               printk(KERN_ALERT "   shared(rx_req_prod=%08x "
+                      "rx_resp_prod=%08x\n",
+                      netif->rx->req_prod, netif->rx->resp_prod);
+               printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
+                      netif->rx->event, netif->tx->req_prod);
+               printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
+                      netif->tx->resp_prod, netif->tx->event);
+               i++;
+       }
+
+       spin_unlock_irq(&net_schedule_list_lock);
+       printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+       return IRQ_HANDLED;
 }
 
 static int __init netback_init(void)
 {
-    int i;
-    struct page *page;
-
-    if ( !(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
-         !(xen_start_info->flags & SIF_INITDOMAIN) )
-        return 0;
-
-    IPRINTK("Initialising Xen netif backend.\n");
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    IPRINTK("Using grant tables.\n");
-#endif
-
-    /* We can increase reservation by this much in net_rx_action(). */
-    balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
-
-    skb_queue_head_init(&rx_queue);
-    skb_queue_head_init(&tx_queue);
-
-    init_timer(&net_timer);
-    net_timer.data = 0;
-    net_timer.function = net_alarm;
+       int i;
+       struct page *page;
+
+       if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
+           !(xen_start_info->flags & SIF_INITDOMAIN))
+               return 0;
+
+       IPRINTK("Initialising Xen netif backend.\n");
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       IPRINTK("Using grant tables.\n");
+#endif
+
+       /* We can increase reservation by this much in net_rx_action(). */
+       balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
+
+       skb_queue_head_init(&rx_queue);
+       skb_queue_head_init(&tx_queue);
+
+       init_timer(&net_timer);
+       net_timer.data = 0;
+       net_timer.function = net_alarm;
     
-    page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
-    BUG_ON(page == NULL);
-    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-    {
-        page = virt_to_page(MMAP_VADDR(i));
-        set_page_count(page, 1);
-        SetPageForeign(page, netif_page_release);
-    }
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
-
-    spin_lock_init(&net_schedule_list_lock);
-    INIT_LIST_HEAD(&net_schedule_list);
-
-    netif_xenbus_init();
-
-    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
-                      netif_be_dbg, SA_SHIRQ, 
-                      "net-be-dbg", &netif_be_dbg);
-
-    return 0;
+       page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
+       BUG_ON(page == NULL);
+       mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+       for (i = 0; i < MAX_PENDING_REQS; i++) {
+               page = virt_to_page(MMAP_VADDR(i));
+               set_page_count(page, 1);
+               SetPageForeign(page, netif_page_release);
+       }
+
+       pending_cons = 0;
+       pending_prod = MAX_PENDING_REQS;
+       for (i = 0; i < MAX_PENDING_REQS; i++)
+               pending_ring[i] = i;
+
+       spin_lock_init(&net_schedule_list_lock);
+       INIT_LIST_HEAD(&net_schedule_list);
+
+       netif_xenbus_init();
+
+       (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+                         netif_be_dbg, SA_SHIRQ, 
+                         "net-be-dbg", &netif_be_dbg);
+
+       return 0;
 }
 
 static void netback_cleanup(void)
 {
-    BUG();
+       BUG();
 }
 
 module_init(netback_init);
 module_exit(netback_cleanup);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 18:07:50 2005
@@ -294,3 +294,13 @@
 {
        xenbus_register_backend(&netback);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Sep 16 
18:07:50 2005
@@ -54,43 +54,10 @@
 #include <asm-xen/balloon.h>
 #include <asm/page.h>
 #include <asm/uaccess.h>
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
 #include <asm-xen/xen-public/grant_table.h>
 #include <asm-xen/gnttab.h>
 
-static grant_ref_t gref_tx_head;
-static grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
-
-static grant_ref_t gref_rx_head;
-static grant_ref_t grant_rx_ref[NETIF_RX_RING_SIZE + 1];
-
 #define GRANT_INVALID_REF      (0xFFFF)
-
-#ifdef GRANT_DEBUG
-static void
-dump_packet(int tag, void *addr, u32 ap)
-{
-    unsigned char *p = (unsigned char *)ap;
-    int i;
-    
-    printk(KERN_ALERT "#### rx_poll   %c %08x ", tag & 0xff, addr);
-    for (i = 0; i < 20; i++) {
-        printk("%02x", p[i]);
-    }
-    printk("\n");
-}
-
-#define GDPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
-                           __FILE__ , __LINE__ , ## _a )
-#else 
-#define dump_packet(x,y,z)  ((void)0)  
-#define GDPRINTK(_f, _a...) ((void)0)
-#endif
-
-#endif
-
-
 
 #ifndef __GFP_NOWARN
 #define __GFP_NOWARN 0
@@ -124,7 +91,6 @@
 #define NETIF_STATE_DISCONNECTED 0
 #define NETIF_STATE_CONNECTED    1
 
-
 static unsigned int netif_state = NETIF_STATE_DISCONNECTED;
 
 static void network_tx_buf_gc(struct net_device *dev);
@@ -147,45 +113,50 @@
 #define netfront_info net_private
 struct net_private
 {
-    struct list_head list;
-    struct net_device *netdev;
-
-    struct net_device_stats stats;
-    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
-    unsigned int tx_full;
+       struct list_head list;
+       struct net_device *netdev;
+
+       struct net_device_stats stats;
+       NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+       unsigned int tx_full;
     
-    netif_tx_interface_t *tx;
-    netif_rx_interface_t *rx;
-
-    spinlock_t   tx_lock;
-    spinlock_t   rx_lock;
-
-    unsigned int handle;
-    unsigned int evtchn;
-
-    /* What is the status of our connection to the remote backend? */
+       netif_tx_interface_t *tx;
+       netif_rx_interface_t *rx;
+
+       spinlock_t   tx_lock;
+       spinlock_t   rx_lock;
+
+       unsigned int handle;
+       unsigned int evtchn;
+
+       /* What is the status of our connection to the remote backend? */
 #define BEST_CLOSED       0
 #define BEST_DISCONNECTED 1
 #define BEST_CONNECTED    2
-    unsigned int backend_state;
-
-    /* Is this interface open or closed (down or up)? */
+       unsigned int backend_state;
+
+       /* Is this interface open or closed (down or up)? */
 #define UST_CLOSED        0
 #define UST_OPEN          1
-    unsigned int user_state;
-
-    /* Receive-ring batched refills. */
+       unsigned int user_state;
+
+       /* Receive-ring batched refills. */
 #define RX_MIN_TARGET 8
 #define RX_MAX_TARGET NETIF_RX_RING_SIZE
-    int rx_min_target, rx_max_target, rx_target;
-    struct sk_buff_head rx_batch;
-
-    /*
-     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
-     * array is an index into a chain of free entries.
-     */
-    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
-    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+       int rx_min_target, rx_max_target, rx_target;
+       struct sk_buff_head rx_batch;
+
+       /*
+        * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+        * array is an index into a chain of free entries.
+        */
+       struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+       struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+
+       grant_ref_t gref_tx_head;
+       grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
+       grant_ref_t gref_rx_head;
+       grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
 
        struct xenbus_device *xbdev;
        char *backend;
@@ -197,32 +168,32 @@
 };
 
 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
-#define ADD_ID_TO_FREELIST(_list, _id)             \
-    (_list)[(_id)] = (_list)[0];                   \
-    (_list)[0]     = (void *)(unsigned long)(_id);
-#define GET_ID_FROM_FREELIST(_list)                \
- ({ unsigned long _id = (unsigned long)(_list)[0]; \
-    (_list)[0]  = (_list)[_id];                    \
-    (unsigned short)_id; })
+#define ADD_ID_TO_FREELIST(_list, _id)                 \
+       (_list)[(_id)] = (_list)[0];                    \
+       (_list)[0]     = (void *)(unsigned long)(_id);
+#define GET_ID_FROM_FREELIST(_list)                            \
+       ({ unsigned long _id = (unsigned long)(_list)[0];       \
+          (_list)[0]  = (_list)[_id];                          \
+          (unsigned short)_id; })
 
 #ifdef DEBUG
 static char *be_state_name[] = {
-    [BEST_CLOSED]       = "closed",
-    [BEST_DISCONNECTED] = "disconnected",
-    [BEST_CONNECTED]    = "connected",
+       [BEST_CLOSED]       = "closed",
+       [BEST_DISCONNECTED] = "disconnected",
+       [BEST_CONNECTED]    = "connected",
 };
 #endif
 
 #ifdef DEBUG
 #define DPRINTK(fmt, args...) \
-    printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+       printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, 
##args)
 #else
 #define DPRINTK(fmt, args...) ((void)0)
 #endif
 #define IPRINTK(fmt, args...) \
-    printk(KERN_INFO "xen_net: " fmt, ##args)
+       printk(KERN_INFO "xen_net: " fmt, ##args)
 #define WPRINTK(fmt, args...) \
-    printk(KERN_WARNING "xen_net: " fmt, ##args)
+       printk(KERN_WARNING "xen_net: " fmt, ##args)
 
 /** Send a packet on a net device to encourage switches to learn the
  * MAC. We send a fake ARP request.
@@ -232,625 +203,627 @@
  */
 static int send_fake_arp(struct net_device *dev)
 {
-    struct sk_buff *skb;
-    u32             src_ip, dst_ip;
-
-    dst_ip = INADDR_BROADCAST;
-    src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
-
-    /* No IP? Then nothing to do. */
-    if (src_ip == 0)
-        return 0;
-
-    skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
-                     dst_ip, dev, src_ip,
-                     /*dst_hw*/ NULL, /*src_hw*/ NULL, 
-                     /*target_hw*/ dev->dev_addr);
-    if (skb == NULL)
-        return -ENOMEM;
-
-    return dev_queue_xmit(skb);
+       struct sk_buff *skb;
+       u32             src_ip, dst_ip;
+
+       dst_ip = INADDR_BROADCAST;
+       src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
+       /* No IP? Then nothing to do. */
+       if (src_ip == 0)
+               return 0;
+
+       skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+                        dst_ip, dev, src_ip,
+                        /*dst_hw*/ NULL, /*src_hw*/ NULL, 
+                        /*target_hw*/ dev->dev_addr);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       return dev_queue_xmit(skb);
 }
 
 static int network_open(struct net_device *dev)
 {
-    struct net_private *np = netdev_priv(dev);
-
-    memset(&np->stats, 0, sizeof(np->stats));
-
-    np->user_state = UST_OPEN;
-
-    network_alloc_rx_buffers(dev);
-    np->rx->event = np->rx_resp_cons + 1;
-
-    netif_start_queue(dev);
-
-    return 0;
+       struct net_private *np = netdev_priv(dev);
+
+       memset(&np->stats, 0, sizeof(np->stats));
+
+       np->user_state = UST_OPEN;
+
+       network_alloc_rx_buffers(dev);
+       np->rx->event = np->rx_resp_cons + 1;
+
+       netif_start_queue(dev);
+
+       return 0;
 }
 
 static void network_tx_buf_gc(struct net_device *dev)
 {
-    NETIF_RING_IDX i, prod;
-    unsigned short id;
-    struct net_private *np = netdev_priv(dev);
-    struct sk_buff *skb;
-
-    if (np->backend_state != BEST_CONNECTED)
-        return;
-
-    do {
-        prod = np->tx->resp_prod;
-        rmb(); /* Ensure we see responses up to 'rp'. */
-
-        for (i = np->tx_resp_cons; i != prod; i++) {
-            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
-            skb = np->tx_skbs[id];
-#ifdef CONFIG_XEN_NETDEV_GRANT
-            if (unlikely(gnttab_query_foreign_access(grant_tx_ref[id]) != 0)) {
-                /* other domain is still using this grant - shouldn't happen
-                   but if it does, we'll try to reclaim the grant later */
-                printk(KERN_ALERT "network_tx_buf_gc: warning -- grant "
-                       "still in use by backend domain.\n");
-                goto out; 
-            }
-            gnttab_end_foreign_access_ref(grant_tx_ref[id], GNTMAP_readonly);
-            gnttab_release_grant_reference(&gref_tx_head, grant_tx_ref[id]);
-            grant_tx_ref[id] = GRANT_INVALID_REF;
-#endif
-            ADD_ID_TO_FREELIST(np->tx_skbs, id);
-            dev_kfree_skb_irq(skb);
-        }
+       NETIF_RING_IDX i, prod;
+       unsigned short id;
+       struct net_private *np = netdev_priv(dev);
+       struct sk_buff *skb;
+
+       if (np->backend_state != BEST_CONNECTED)
+               return;
+
+       do {
+               prod = np->tx->resp_prod;
+               rmb(); /* Ensure we see responses up to 'rp'. */
+
+               for (i = np->tx_resp_cons; i != prod; i++) {
+                       id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+                       skb = np->tx_skbs[id];
+#ifdef CONFIG_XEN_NETDEV_GRANT
+                       if 
(unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
+                               printk(KERN_ALERT "network_tx_buf_gc: warning "
+                                      "-- grant still in use by backend "
+                                      "domain.\n");
+                               goto out; 
+                       }
+                       gnttab_end_foreign_access_ref(
+                               np->grant_tx_ref[id], GNTMAP_readonly);
+                       gnttab_release_grant_reference(
+                               &np->gref_tx_head, np->grant_tx_ref[id]);
+                       np->grant_tx_ref[id] = GRANT_INVALID_REF;
+#endif
+                       ADD_ID_TO_FREELIST(np->tx_skbs, id);
+                       dev_kfree_skb_irq(skb);
+               }
         
-        np->tx_resp_cons = prod;
+               np->tx_resp_cons = prod;
         
-        /*
-         * Set a new event, then check for race with update of tx_cons. Note
-         * that it is essential to schedule a callback, no matter how few
-         * buffers are pending. Even if there is space in the transmit ring,
-         * higher layers may be blocked because too much data is outstanding:
-         * in such cases notification from Xen is likely to be the only kick
-         * that we'll get.
-         */
-        np->tx->event = 
-            prod + ((np->tx->req_prod - prod) >> 1) + 1;
-        mb();
-    } while (prod != np->tx->resp_prod);
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-  out: 
-#endif
-
-    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
-        np->tx_full = 0;
-        if (np->user_state == UST_OPEN)
-            netif_wake_queue(dev);
-    }
+               /*
+                * Set a new event, then check for race with update of tx_cons.
+                * Note that it is essential to schedule a callback, no matter
+                * how few buffers are pending. Even if there is space in the
+                * transmit ring, higher layers may be blocked because too much
+                * data is outstanding: in such cases notification from Xen is
+                * likely to be the only kick that we'll get.
+                */
+               np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
+               mb();
+       } while (prod != np->tx->resp_prod);
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+ out: 
+#endif
+
+       if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
+               np->tx_full = 0;
+               if (np->user_state == UST_OPEN)
+                       netif_wake_queue(dev);
+       }
 }
 
 
 static void network_alloc_rx_buffers(struct net_device *dev)
 {
-    unsigned short id;
-    struct net_private *np = netdev_priv(dev);
-    struct sk_buff *skb;
-    int i, batch_target;
-    NETIF_RING_IDX req_prod = np->rx->req_prod;
-    struct xen_memory_reservation reservation;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    grant_ref_t ref;
-#endif
-
-    if (unlikely(np->backend_state != BEST_CONNECTED))
-        return;
-
-    /*
-     * Allocate skbuffs greedily, even though we batch updates to the
-     * receive ring. This creates a less bursty demand on the memory allocator,
-     * so should reduce the chance of failed allocation requests both for
-     * ourself and for other kernel subsystems.
-     */
-    batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
-    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
-        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
-            break;
-        __skb_queue_tail(&np->rx_batch, skb);
-    }
-
-    /* Is the batch large enough to be worthwhile? */
-    if (i < (np->rx_target/2))
-        return;
-
-    for (i = 0; ; i++) {
-        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
-            break;
-
-        skb->dev = dev;
-
-        id = GET_ID_FROM_FREELIST(np->rx_skbs);
-
-        np->rx_skbs[id] = skb;
+       unsigned short id;
+       struct net_private *np = netdev_priv(dev);
+       struct sk_buff *skb;
+       int i, batch_target;
+       NETIF_RING_IDX req_prod = np->rx->req_prod;
+       struct xen_memory_reservation reservation;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       grant_ref_t ref;
+#endif
+
+       if (unlikely(np->backend_state != BEST_CONNECTED))
+               return;
+
+       /*
+        * Allocate skbuffs greedily, even though we batch updates to the
+        * receive ring. This creates a less bursty demand on the memory
+        * allocator, so should reduce the chance of failed allocation requests
+        *  both for ourself and for other kernel subsystems.
+        */
+       batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+       for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+               skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
+               if (skb == NULL)
+                       break;
+               __skb_queue_tail(&np->rx_batch, skb);
+       }
+
+       /* Is the batch large enough to be worthwhile? */
+       if (i < (np->rx_target/2))
+               return;
+
+       for (i = 0; ; i++) {
+               if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
+                       break;
+
+               skb->dev = dev;
+
+               id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+               np->rx_skbs[id] = skb;
         
-        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-       ref = gnttab_claim_grant_reference(&gref_rx_head);
-        if (unlikely((signed short)ref < 0)) {
-            printk(KERN_ALERT "#### netfront can't claim rx reference\n");
-            BUG();
-        }
-        grant_rx_ref[id] = ref;
-        gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
-        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
-#endif
-        rx_pfn_array[i] = virt_to_mfn(skb->head);
-
-       /* Remove this page from pseudo phys map before passing back to Xen. */
-       phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
-           = INVALID_P2M_ENTRY;
-
-       MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
-                               __pte(0), 0);
-    }
-
-    /* After all PTEs have been zapped we blow away stale TLB entries. */
-    rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-
-    /* Give away a batch of pages. */
-    rx_mcl[i].op = __HYPERVISOR_memory_op;
-    rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-    rx_mcl[i].args[1] = (unsigned long)&reservation;
-
-    reservation.extent_start = rx_pfn_array;
-    reservation.nr_extents   = i;
-    reservation.extent_order = 0;
-    reservation.address_bits = 0;
-    reservation.domid        = DOMID_SELF;
-
-    /* Tell the ballon driver what is going on. */
-    balloon_update_driver_allowance(i);
-
-    /* Zap PTEs and give away pages in one big multicall. */
-    (void)HYPERVISOR_multicall(rx_mcl, i+1);
-
-    /* Check return status of HYPERVISOR_memory_op(). */
-    if (unlikely(rx_mcl[i].result != i))
-        panic("Unable to reduce memory reservation\n");
-
-    /* Above is a suitable barrier to ensure backend will see requests. */
-    np->rx->req_prod = req_prod + i;
-
-    /* Adjust our floating fill target if we risked running out of buffers. */
-    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
-         ((np->rx_target *= 2) > np->rx_max_target))
-        np->rx_target = np->rx_max_target;
+               np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+               BUG_ON((signed short)ref < 0);
+               np->grant_rx_ref[id] = ref;
+               gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
+               np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
+#endif
+               rx_pfn_array[i] = virt_to_mfn(skb->head);
+
+               /* Remove this page from map before passing back to Xen. */
+               phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
+                       = INVALID_P2M_ENTRY;
+
+               MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
+                                       __pte(0), 0);
+       }
+
+       /* After all PTEs have been zapped we blow away stale TLB entries. */
+       rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+
+       /* Give away a batch of pages. */
+       rx_mcl[i].op = __HYPERVISOR_memory_op;
+       rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+       rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+       reservation.extent_start = rx_pfn_array;
+       reservation.nr_extents   = i;
+       reservation.extent_order = 0;
+       reservation.address_bits = 0;
+       reservation.domid        = DOMID_SELF;
+
+       /* Tell the ballon driver what is going on. */
+       balloon_update_driver_allowance(i);
+
+       /* Zap PTEs and give away pages in one big multicall. */
+       (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+       /* Check return status of HYPERVISOR_memory_op(). */
+       if (unlikely(rx_mcl[i].result != i))
+               panic("Unable to reduce memory reservation\n");
+
+       /* Above is a suitable barrier to ensure backend will see requests. */
+       np->rx->req_prod = req_prod + i;
+
+       /* Adjust our fill target if we risked running out of buffers. */
+       if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+           ((np->rx_target *= 2) > np->rx_max_target))
+               np->rx_target = np->rx_max_target;
 }
 
 
 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-    unsigned short id;
-    struct net_private *np = netdev_priv(dev);
-    netif_tx_request_t *tx;
-    NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    grant_ref_t ref;
-    unsigned long mfn;
-#endif
-
-    if (unlikely(np->tx_full)) {
-        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
-        netif_stop_queue(dev);
-        goto drop;
-    }
-
-    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
-                  PAGE_SIZE)) {
-        struct sk_buff *nskb;
-        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
-            goto drop;
-        skb_put(nskb, skb->len);
-        memcpy(nskb->data, skb->data, skb->len);
-        nskb->dev = skb->dev;
-        dev_kfree_skb(skb);
-        skb = nskb;
-    }
+       unsigned short id;
+       struct net_private *np = netdev_priv(dev);
+       netif_tx_request_t *tx;
+       NETIF_RING_IDX i;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       grant_ref_t ref;
+       unsigned long mfn;
+#endif
+
+       if (unlikely(np->tx_full)) {
+               printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
+                      dev->name);
+               netif_stop_queue(dev);
+               goto drop;
+       }
+
+       if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+                    PAGE_SIZE)) {
+               struct sk_buff *nskb;
+               if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
+                       goto drop;
+               skb_put(nskb, skb->len);
+               memcpy(nskb->data, skb->data, skb->len);
+               nskb->dev = skb->dev;
+               dev_kfree_skb(skb);
+               skb = nskb;
+       }
     
-    spin_lock_irq(&np->tx_lock);
-
-    if (np->backend_state != BEST_CONNECTED) {
-        spin_unlock_irq(&np->tx_lock);
-        goto drop;
-    }
-
-    i = np->tx->req_prod;
-
-    id = GET_ID_FROM_FREELIST(np->tx_skbs);
-    np->tx_skbs[id] = skb;
-
-    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
-
-    tx->id   = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    ref = gnttab_claim_grant_reference(&gref_tx_head);
-    if (unlikely((signed short)ref < 0)) {
-        printk(KERN_ALERT "#### netfront can't claim tx grant reference\n");
-        BUG();
-    }
-    mfn = virt_to_mfn(skb->data);
-    gnttab_grant_foreign_access_ref(ref, np->backend_id, mfn, GNTMAP_readonly);
-    tx->addr = ref << PAGE_SHIFT;
-    grant_tx_ref[id] = ref;
+       spin_lock_irq(&np->tx_lock);
+
+       if (np->backend_state != BEST_CONNECTED) {
+               spin_unlock_irq(&np->tx_lock);
+               goto drop;
+       }
+
+       i = np->tx->req_prod;
+
+       id = GET_ID_FROM_FREELIST(np->tx_skbs);
+       np->tx_skbs[id] = skb;
+
+       tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+       tx->id   = id;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+       BUG_ON((signed short)ref < 0);
+       mfn = virt_to_mfn(skb->data);
+       gnttab_grant_foreign_access_ref(
+               ref, np->backend_id, mfn, GNTMAP_readonly);
+       tx->addr = ref << PAGE_SHIFT;
+       np->grant_tx_ref[id] = ref;
 #else
-    tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
-    tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
-    tx->size = skb->len;
-    tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
-
-    wmb(); /* Ensure that backend will see the request. */
-    np->tx->req_prod = i + 1;
-
-    network_tx_buf_gc(dev);
-
-    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
-        np->tx_full = 1;
-        netif_stop_queue(dev);
-    }
-
-    spin_unlock_irq(&np->tx_lock);
-
-    np->stats.tx_bytes += skb->len;
-    np->stats.tx_packets++;
-
-    /* Only notify Xen if we really have to. */
-    mb();
-    if (np->tx->TX_TEST_IDX == i)
-        notify_via_evtchn(np->evtchn);
-
-    return 0;
+       tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
+#endif
+       tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+       tx->size = skb->len;
+       tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
+
+       wmb(); /* Ensure that backend will see the request. */
+       np->tx->req_prod = i + 1;
+
+       network_tx_buf_gc(dev);
+
+       if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
+               np->tx_full = 1;
+               netif_stop_queue(dev);
+       }
+
+       spin_unlock_irq(&np->tx_lock);
+
+       np->stats.tx_bytes += skb->len;
+       np->stats.tx_packets++;
+
+       /* Only notify Xen if we really have to. */
+       mb();
+       if (np->tx->TX_TEST_IDX == i)
+               notify_via_evtchn(np->evtchn);
+
+       return 0;
 
  drop:
-    np->stats.tx_dropped++;
-    dev_kfree_skb(skb);
-    return 0;
+       np->stats.tx_dropped++;
+       dev_kfree_skb(skb);
+       return 0;
 }
 
 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
 {
-    struct net_device *dev = dev_id;
-    struct net_private *np = netdev_priv(dev);
-    unsigned long flags;
-
-    spin_lock_irqsave(&np->tx_lock, flags);
-    network_tx_buf_gc(dev);
-    spin_unlock_irqrestore(&np->tx_lock, flags);
-
-    if((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
-        netif_rx_schedule(dev);
-
-    return IRQ_HANDLED;
+       struct net_device *dev = dev_id;
+       struct net_private *np = netdev_priv(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&np->tx_lock, flags);
+       network_tx_buf_gc(dev);
+       spin_unlock_irqrestore(&np->tx_lock, flags);
+
+       if ((np->rx_resp_cons != np->rx->resp_prod) &&
+           (np->user_state == UST_OPEN))
+               netif_rx_schedule(dev);
+
+       return IRQ_HANDLED;
 }
 
 
 static int netif_poll(struct net_device *dev, int *pbudget)
 {
-    struct net_private *np = netdev_priv(dev);
-    struct sk_buff *skb, *nskb;
-    netif_rx_response_t *rx;
-    NETIF_RING_IDX i, rp;
-    mmu_update_t *mmu = rx_mmu;
-    multicall_entry_t *mcl = rx_mcl;
-    int work_done, budget, more_to_do = 1;
-    struct sk_buff_head rxq;
-    unsigned long flags;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    unsigned long mfn;
-    grant_ref_t ref;
-#endif
-
-    spin_lock(&np->rx_lock);
-
-    if (np->backend_state != BEST_CONNECTED) {
-        spin_unlock(&np->rx_lock);
-        return 0;
-    }
-
-    skb_queue_head_init(&rxq);
-
-    if ((budget = *pbudget) > dev->quota)
-        budget = dev->quota;
-    rp = np->rx->resp_prod;
-    rmb(); /* Ensure we see queued responses up to 'rp'. */
-
-    for (i = np->rx_resp_cons, work_done = 0; 
-                   (i != rp) && (work_done < budget);
-                   i++, work_done++) {
-        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-        /*
-         * An error here is very odd. Usually indicates a backend bug,
-         * low-memory condition, or that we didn't have reservation headroom.
-         */
-        if (unlikely(rx->status <= 0)) {
-            if (net_ratelimit())
-                printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
-            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
-            wmb();
-            np->rx->req_prod++;
-            work_done--;
-            continue;
-        }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        ref = grant_rx_ref[rx->id]; 
-
-        if(ref == GRANT_INVALID_REF) { 
-            printk(KERN_WARNING "Bad rx grant reference %d from dom %d.\n",
-                   ref, np->backend_id);
-            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
-            wmb();
-            np->rx->req_prod++;
-            work_done--;
-            continue;
-        }
-
-        grant_rx_ref[rx->id] = GRANT_INVALID_REF;
-        mfn = gnttab_end_foreign_transfer_ref(ref);
-        gnttab_release_grant_reference(&gref_rx_head, ref);
-#endif
-
-        skb = np->rx_skbs[rx->id];
-        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
-
-        /* NB. We handle skb overflow later. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        skb->data = skb->head + rx->addr;
+       struct net_private *np = netdev_priv(dev);
+       struct sk_buff *skb, *nskb;
+       netif_rx_response_t *rx;
+       NETIF_RING_IDX i, rp;
+       mmu_update_t *mmu = rx_mmu;
+       multicall_entry_t *mcl = rx_mcl;
+       int work_done, budget, more_to_do = 1;
+       struct sk_buff_head rxq;
+       unsigned long flags;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       unsigned long mfn;
+       grant_ref_t ref;
+#endif
+
+       spin_lock(&np->rx_lock);
+
+       if (np->backend_state != BEST_CONNECTED) {
+               spin_unlock(&np->rx_lock);
+               return 0;
+       }
+
+       skb_queue_head_init(&rxq);
+
+       if ((budget = *pbudget) > dev->quota)
+               budget = dev->quota;
+       rp = np->rx->resp_prod;
+       rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+       for (i = np->rx_resp_cons, work_done = 0; 
+            (i != rp) && (work_done < budget);
+            i++, work_done++) {
+               rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+               /*
+                * An error here is very odd. Usually indicates a backend bug,
+                * low-mem condition, or we didn't have reservation headroom.
+                */
+               if (unlikely(rx->status <= 0)) {
+                       if (net_ratelimit())
+                               printk(KERN_WARNING "Bad rx buffer "
+                                      "(memory squeeze?).\n");
+                       np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
+                               req.id = rx->id;
+                       wmb();
+                       np->rx->req_prod++;
+                       work_done--;
+                       continue;
+               }
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               ref = np->grant_rx_ref[rx->id]; 
+
+               if(ref == GRANT_INVALID_REF) { 
+                       printk(KERN_WARNING "Bad rx grant reference %d "
+                              "from dom %d.\n",
+                              ref, np->backend_id);
+                       np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
+                               req.id = rx->id;
+                       wmb();
+                       np->rx->req_prod++;
+                       work_done--;
+                       continue;
+               }
+
+               np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
+               mfn = gnttab_end_foreign_transfer_ref(ref);
+               gnttab_release_grant_reference(&np->gref_rx_head, ref);
+#endif
+
+               skb = np->rx_skbs[rx->id];
+               ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+               /* NB. We handle skb overflow later. */
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               skb->data = skb->head + rx->addr;
 #else
-        skb->data = skb->head + (rx->addr & ~PAGE_MASK);
-#endif
-        skb->len  = rx->status;
-        skb->tail = skb->data + skb->len;
-
-        if ( rx->csum_valid )
-            skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-        np->stats.rx_packets++;
-        np->stats.rx_bytes += rx->status;
-
-        /* Remap the page. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | 
MMU_MACHPHYS_UPDATE;
+               skb->data = skb->head + (rx->addr & ~PAGE_MASK);
+#endif
+               skb->len  = rx->status;
+               skb->tail = skb->data + skb->len;
+
+               if ( rx->csum_valid )
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               np->stats.rx_packets++;
+               np->stats.rx_bytes += rx->status;
+
+               /* Remap the page. */
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
 #else
-        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
-#endif
-        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
-        mmu++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-       MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-                               pfn_pte_ma(mfn, PAGE_KERNEL), 0);
+               mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+#endif
+               mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
+               mmu++;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
+                                       pfn_pte_ma(mfn, PAGE_KERNEL), 0);
 #else
-       MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-                               pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
-                                           PAGE_KERNEL), 0);
-#endif
-        mcl++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
-        GDPRINTK("#### rx_poll     enqueue vdata=%p mfn=%lu ref=%x\n",
-                skb->data, mfn, ref);
+               MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
+                                       pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
+                                                  PAGE_KERNEL), 0);
+#endif
+               mcl++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
 #else
-        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
-            rx->addr >> PAGE_SHIFT;
+               phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
+                       rx->addr >> PAGE_SHIFT;
 #endif 
 
 
-        __skb_queue_tail(&rxq, skb);
-    }
-
-
-    /* Some pages are no longer absent... */
-    balloon_update_driver_allowance(-work_done);
-
-    /* Do all the remapping work, and M->P updates, in one big hypercall. */
-    if (likely((mcl - rx_mcl) != 0)) {
-        mcl->op = __HYPERVISOR_mmu_update;
-        mcl->args[0] = (unsigned long)rx_mmu;
-        mcl->args[1] = mmu - rx_mmu;
-        mcl->args[2] = 0;
-        mcl->args[3] = DOMID_SELF;
-        mcl++;
-        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-    }
-
-    while ((skb = __skb_dequeue(&rxq)) != NULL) {
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        GDPRINTK("#### rx_poll     dequeue vdata=%p mfn=%lu\n",
-                skb->data, virt_to_mfn(skb->data));
-        dump_packet('d', skb->data, (unsigned long)skb->data);
-#endif
-        /*
-         * Enough room in skbuff for the data we were passed? Also, Linux 
-         * expects at least 16 bytes headroom in each receive buffer.
-         */
-        if (unlikely(skb->tail > skb->end) || 
-                       unlikely((skb->data - skb->head) < 16)) {
-            nskb = NULL;
-
-
-            /* Only copy the packet if it fits in the current MTU. */
-            if (skb->len <= (dev->mtu + ETH_HLEN)) {
-                if ((skb->tail > skb->end) && net_ratelimit())
-                    printk(KERN_INFO "Received packet needs %zd bytes more "
-                           "headroom.\n", skb->tail - skb->end);
-
-                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
-                    skb_reserve(nskb, 2);
-                    skb_put(nskb, skb->len);
-                    memcpy(nskb->data, skb->data, skb->len);
-                    nskb->dev = skb->dev;
-                }
-            }
-            else if (net_ratelimit())
-                printk(KERN_INFO "Received packet too big for MTU "
-                       "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
-
-            /* Reinitialise and then destroy the old skbuff. */
-            skb->len  = 0;
-            skb->tail = skb->data;
-            init_skb_shinfo(skb);
-            dev_kfree_skb(skb);
-
-            /* Switch old for new, if we copied the buffer. */
-            if ((skb = nskb) == NULL)
-                continue;
-        }
+               __skb_queue_tail(&rxq, skb);
+       }
+
+       /* Some pages are no longer absent... */
+       balloon_update_driver_allowance(-work_done);
+
+       /* Do all the remapping work, and M2P updates, in one big hypercall. */
+       if (likely((mcl - rx_mcl) != 0)) {
+               mcl->op = __HYPERVISOR_mmu_update;
+               mcl->args[0] = (unsigned long)rx_mmu;
+               mcl->args[1] = mmu - rx_mmu;
+               mcl->args[2] = 0;
+               mcl->args[3] = DOMID_SELF;
+               mcl++;
+               (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+       }
+
+       while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               /*
+                * Enough room in skbuff for the data we were passed? Also,
+                * Linux expects at least 16 bytes headroom in each rx buffer.
+                */
+               if (unlikely(skb->tail > skb->end) || 
+                   unlikely((skb->data - skb->head) < 16)) {
+                       nskb = NULL;
+
+                       /* Only copy the packet if it fits in the MTU. */
+                       if (skb->len <= (dev->mtu + ETH_HLEN)) {
+                               if ((skb->tail > skb->end) && net_ratelimit())
+                                       printk(KERN_INFO "Received packet "
+                                              "needs %zd bytes more "
+                                              "headroom.\n",
+                                              skb->tail - skb->end);
+
+                               nskb = alloc_xen_skb(skb->len + 2);
+                               if (nskb != NULL) {
+                                       skb_reserve(nskb, 2);
+                                       skb_put(nskb, skb->len);
+                                       memcpy(nskb->data,
+                                              skb->data,
+                                              skb->len);
+                                       nskb->dev = skb->dev;
+                               }
+                       }
+                       else if (net_ratelimit())
+                               printk(KERN_INFO "Received packet too big for "
+                                      "MTU (%d > %d)\n",
+                                      skb->len - ETH_HLEN, dev->mtu);
+
+                       /* Reinitialise and then destroy the old skbuff. */
+                       skb->len  = 0;
+                       skb->tail = skb->data;
+                       init_skb_shinfo(skb);
+                       dev_kfree_skb(skb);
+
+                       /* Switch old for new, if we copied the buffer. */
+                       if ((skb = nskb) == NULL)
+                               continue;
+               }
         
-        /* Set the shared-info area, which is hidden behind the real data. */
-        init_skb_shinfo(skb);
-        /* Ethernet-specific work. Delayed to here as it peeks the header. */
-        skb->protocol = eth_type_trans(skb, dev);
-
-        /* Pass it up. */
-        netif_receive_skb(skb);
-        dev->last_rx = jiffies;
-    }
-
-    np->rx_resp_cons = i;
-
-    /* If we get a callback with very few responses, reduce fill target. */
-    /* NB. Note exponential increase, linear decrease. */
-    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
-         (--np->rx_target < np->rx_min_target))
-        np->rx_target = np->rx_min_target;
-
-    network_alloc_rx_buffers(dev);
-
-    *pbudget   -= work_done;
-    dev->quota -= work_done;
-
-    if (work_done < budget) {
-        local_irq_save(flags);
-
-        np->rx->event = i + 1;
+               /* Set the shinfo area, which is hidden behind the data. */
+               init_skb_shinfo(skb);
+               /* Ethernet work: Delayed to here as it peeks the header. */
+               skb->protocol = eth_type_trans(skb, dev);
+
+               /* Pass it up. */
+               netif_receive_skb(skb);
+               dev->last_rx = jiffies;
+       }
+
+       np->rx_resp_cons = i;
+
+       /* If we get a callback with very few responses, reduce fill target. */
+       /* NB. Note exponential increase, linear decrease. */
+       if (((np->rx->req_prod - np->rx->resp_prod) >
+            ((3*np->rx_target) / 4)) &&
+           (--np->rx_target < np->rx_min_target))
+               np->rx_target = np->rx_min_target;
+
+       network_alloc_rx_buffers(dev);
+
+       *pbudget   -= work_done;
+       dev->quota -= work_done;
+
+       if (work_done < budget) {
+               local_irq_save(flags);
+
+               np->rx->event = i + 1;
     
-        /* Deal with hypervisor racing our resetting of rx_event. */
-        mb();
-        if (np->rx->resp_prod == i) {
-            __netif_rx_complete(dev);
-            more_to_do = 0;
-        }
-
-        local_irq_restore(flags);
-    }
-
-    spin_unlock(&np->rx_lock);
-
-    return more_to_do;
+               /* Deal with hypervisor racing our resetting of rx_event. */
+               mb();
+               if (np->rx->resp_prod == i) {
+                       __netif_rx_complete(dev);
+                       more_to_do = 0;
+               }
+
+               local_irq_restore(flags);
+       }
+
+       spin_unlock(&np->rx_lock);
+
+       return more_to_do;
 }
 
 
 static int network_close(struct net_device *dev)
 {
-    struct net_private *np = netdev_priv(dev);
-    np->user_state = UST_CLOSED;
-    netif_stop_queue(np->netdev);
-    return 0;
+       struct net_private *np = netdev_priv(dev);
+       np->user_state = UST_CLOSED;
+       netif_stop_queue(np->netdev);
+       return 0;
 }
 
 
 static struct net_device_stats *network_get_stats(struct net_device *dev)
 {
-    struct net_private *np = netdev_priv(dev);
-    return &np->stats;
+       struct net_private *np = netdev_priv(dev);
+       return &np->stats;
 }
 
 static void network_connect(struct net_device *dev)
 {
-    struct net_private *np;
-    int i, requeue_idx;
-    netif_tx_request_t *tx;
-
-    np = netdev_priv(dev);
-    spin_lock_irq(&np->tx_lock);
-    spin_lock(&np->rx_lock);
-
-    /* Recovery procedure: */
-
-    /* Step 1: Reinitialise variables. */
-    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
-    np->rx->event = np->tx->event = 1;
-
-    /* Step 2: Rebuild the RX and TX ring contents.
-     * NB. We could just free the queued TX packets now but we hope
-     * that sending them out might do some good.  We have to rebuild
-     * the RX ring because some of our pages are currently flipped out
-     * so we can't just free the RX skbs.
-     * NB2. Freelist index entries are always going to be less than
-     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
-     * greater than __PAGE_OFFSET: we use this property to distinguish
-     * them.
-     */
-
-    /* Rebuild the TX buffer freelist and the TX ring itself.
-     * NB. This reorders packets.  We could keep more private state
-     * to avoid this but maybe it doesn't matter so much given the
-     * interface has been down.
-     */
-    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
-        if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
-            struct sk_buff *skb = np->tx_skbs[i];
-
-            tx = &np->tx->ring[requeue_idx++].req;
-
-            tx->id   = i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-            gnttab_grant_foreign_access_ref(grant_tx_ref[i], np->backend_id, 
-                                            virt_to_mfn(np->tx_skbs[i]->data),
-                                            GNTMAP_readonly); 
-            tx->addr = grant_tx_ref[i] << PAGE_SHIFT; 
+       struct net_private *np;
+       int i, requeue_idx;
+       netif_tx_request_t *tx;
+
+       np = netdev_priv(dev);
+       spin_lock_irq(&np->tx_lock);
+       spin_lock(&np->rx_lock);
+
+       /* Recovery procedure: */
+
+       /* Step 1: Reinitialise variables. */
+       np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+       np->rx->event = np->tx->event = 1;
+
+       /* Step 2: Rebuild the RX and TX ring contents.
+        * NB. We could just free the queued TX packets now but we hope
+        * that sending them out might do some good.  We have to rebuild
+        * the RX ring because some of our pages are currently flipped out
+        * so we can't just free the RX skbs.
+        * NB2. Freelist index entries are always going to be less than
+        *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+        * greater than __PAGE_OFFSET: we use this property to distinguish
+        * them.
+        */
+
+       /* Rebuild the TX buffer freelist and the TX ring itself.
+        * NB. This reorders packets.  We could keep more private state
+        * to avoid this but maybe it doesn't matter so much given the
+        * interface has been down.
+        */
+       for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
+               if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
+                       struct sk_buff *skb = np->tx_skbs[i];
+
+                       tx = &np->tx->ring[requeue_idx++].req;
+
+                       tx->id   = i;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+                       gnttab_grant_foreign_access_ref(
+                               np->grant_tx_ref[i], np->backend_id, 
+                               virt_to_mfn(np->tx_skbs[i]->data),
+                               GNTMAP_readonly); 
+                       tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
 #else
-            tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
-            tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
-            tx->size = skb->len;
-
-            np->stats.tx_bytes += skb->len;
-            np->stats.tx_packets++;
-        }
-    }
-    wmb();
-    np->tx->req_prod = requeue_idx;
-
-    /* Rebuild the RX buffer freelist and the RX ring itself. */
-    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
-        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
+                       tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
+#endif
+                       tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+                       tx->size = skb->len;
+
+                       np->stats.tx_bytes += skb->len;
+                       np->stats.tx_packets++;
+               }
+       }
+       wmb();
+       np->tx->req_prod = requeue_idx;
+
+       /* Rebuild the RX buffer freelist and the RX ring itself. */
+       for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
+               if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
 #ifdef CONFIG_XEN_NETDEV_GRANT 
-            /* Reinstate the grant ref so backend can transfer mfn to us. */
-            gnttab_grant_foreign_transfer_ref(grant_rx_ref[i], np->backend_id);
-            np->rx->ring[requeue_idx].req.gref = grant_rx_ref[i];
-#endif
-            np->rx->ring[requeue_idx].req.id   = i;
-            requeue_idx++; 
-        }
-    }
-
-    wmb();                
-    np->rx->req_prod = requeue_idx;
-
-    /* Step 3: All public and private state should now be sane.  Get
-     * ready to start sending and receiving packets and give the driver
-     * domain a kick because we've probably just requeued some
-     * packets.
-     */
-    np->backend_state = BEST_CONNECTED;
-    wmb();
-    notify_via_evtchn(np->evtchn);  
-    network_tx_buf_gc(dev);
-
-    if (np->user_state == UST_OPEN)
-        netif_start_queue(dev);
-
-    spin_unlock(&np->rx_lock);
-    spin_unlock_irq(&np->tx_lock);
+                       gnttab_grant_foreign_transfer_ref(
+                               np->grant_rx_ref[i], np->backend_id);
+                       np->rx->ring[requeue_idx].req.gref =
+                               np->grant_rx_ref[i];
+#endif
+                       np->rx->ring[requeue_idx].req.id = i;
+                       requeue_idx++; 
+               }
+       }
+
+       wmb();                
+       np->rx->req_prod = requeue_idx;
+
+       /* Step 3: All public and private state should now be sane.  Get
+        * ready to start sending and receiving packets and give the driver
+        * domain a kick because we've probably just requeued some
+        * packets.
+        */
+       np->backend_state = BEST_CONNECTED;
+       wmb();
+       notify_via_evtchn(np->evtchn);  
+       network_tx_buf_gc(dev);
+
+       if (np->user_state == UST_OPEN)
+               netif_start_queue(dev);
+
+       spin_unlock(&np->rx_lock);
+       spin_unlock_irq(&np->tx_lock);
 }
 
 static void show_device(struct net_private *np)
@@ -887,6 +860,15 @@
        show_device(np);
 }
 
+static void netif_uninit(struct net_device *dev)
+{
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       struct net_private *np = netdev_priv(dev);
+       gnttab_free_grant_references(np->gref_tx_head);
+       gnttab_free_grant_references(np->gref_rx_head);
+#endif
+}
+
 static struct ethtool_ops network_ethtool_ops =
 {
        .get_tx_csum = ethtool_op_get_tx_csum,
@@ -901,84 +883,107 @@
 static int create_netdev(int handle, struct xenbus_device *dev,
                         struct net_device **val)
 {
-    int i, err = 0;
-    struct net_device *netdev = NULL;
-    struct net_private *np = NULL;
-
-    if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
-        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
-        err = -ENOMEM;
-        goto exit;
-    }
-
-    np                = netdev_priv(netdev);
-    np->backend_state = BEST_CLOSED;
-    np->user_state    = UST_CLOSED;
-    np->handle        = handle;
-    np->xbdev         = dev;
+       int i, err = 0;
+       struct net_device *netdev = NULL;
+       struct net_private *np = NULL;
+
+       if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
+               printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
+                      __FUNCTION__);
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       np                = netdev_priv(netdev);
+       np->backend_state = BEST_CLOSED;
+       np->user_state    = UST_CLOSED;
+       np->handle        = handle;
+       np->xbdev         = dev;
     
-    spin_lock_init(&np->tx_lock);
-    spin_lock_init(&np->rx_lock);
-
-    skb_queue_head_init(&np->rx_batch);
-    np->rx_target     = RX_MIN_TARGET;
-    np->rx_min_target = RX_MIN_TARGET;
-    np->rx_max_target = RX_MAX_TARGET;
-
-    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
-    for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
-        np->tx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        grant_tx_ref[i] = GRANT_INVALID_REF;
-#endif
-    }
-
-    for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
-        np->rx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        grant_rx_ref[i] = GRANT_INVALID_REF;
-#endif
-    }
-
-    netdev->open            = network_open;
-    netdev->hard_start_xmit = network_start_xmit;
-    netdev->stop            = network_close;
-    netdev->get_stats       = network_get_stats;
-    netdev->poll            = netif_poll;
-    netdev->weight          = 64;
-    netdev->features        = NETIF_F_IP_CSUM;
-
-    SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
-
-    if ((err = register_netdev(netdev)) != 0) {
-        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
-        goto exit;
-    }
-
-    if ((err = xennet_proc_addif(netdev)) != 0) {
-        unregister_netdev(netdev);
-        goto exit;
-    }
-
-    np->netdev = netdev;
-
-  exit:
-    if ((err != 0) && (netdev != NULL))
-        kfree(netdev);
-    else if (val != NULL)
-        *val = netdev;
-    return err;
+       spin_lock_init(&np->tx_lock);
+       spin_lock_init(&np->rx_lock);
+
+       skb_queue_head_init(&np->rx_batch);
+       np->rx_target     = RX_MIN_TARGET;
+       np->rx_min_target = RX_MIN_TARGET;
+       np->rx_max_target = RX_MAX_TARGET;
+
+       /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
+       for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
+               np->tx_skbs[i] = (void *)((unsigned long) i+1);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               np->grant_tx_ref[i] = GRANT_INVALID_REF;
+#endif
+       }
+
+       for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
+               np->rx_skbs[i] = (void *)((unsigned long) i+1);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               np->grant_rx_ref[i] = GRANT_INVALID_REF;
+#endif
+       }
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       /* A grant for every tx ring slot */
+       if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
+                                         &np->gref_tx_head) < 0) {
+               printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+               goto exit;
+       }
+       /* A grant for every rx ring slot */
+       if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
+                                         &np->gref_rx_head) < 0) {
+               printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+               gnttab_free_grant_references(np->gref_tx_head);
+               goto exit;
+       }
+#endif
+
+       netdev->open            = network_open;
+       netdev->hard_start_xmit = network_start_xmit;
+       netdev->stop            = network_close;
+       netdev->get_stats       = network_get_stats;
+       netdev->poll            = netif_poll;
+       netdev->uninit          = netif_uninit;
+       netdev->weight          = 64;
+       netdev->features        = NETIF_F_IP_CSUM;
+
+       SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
+
+       if ((err = register_netdev(netdev)) != 0) {
+               printk(KERN_WARNING "%s> register_netdev err=%d\n",
+                      __FUNCTION__, err);
+               goto exit_free_grefs;
+       }
+
+       if ((err = xennet_proc_addif(netdev)) != 0) {
+               unregister_netdev(netdev);
+               goto exit_free_grefs;
+       }
+
+       np->netdev = netdev;
+
+ exit:
+       if ((err != 0) && (netdev != NULL))
+               kfree(netdev);
+       else if (val != NULL)
+               *val = netdev;
+       return err;
+
+ exit_free_grefs:
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gnttab_free_grant_references(np->gref_tx_head);
+       gnttab_free_grant_references(np->gref_rx_head);
+#endif
+       goto exit;
 }
 
 static int destroy_netdev(struct net_device *netdev)
 {
-
 #ifdef CONFIG_PROC_FS
        xennet_proc_delif(netdev);
 #endif
-
         unregister_netdev(netdev);
-
        return 0;
 }
 
@@ -989,20 +994,20 @@
 static int 
 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
 {
-    struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
-    struct net_device *dev = ifa->ifa_dev->dev;
-
-    /* UP event and is it one of our devices? */
-    if (event == NETDEV_UP && dev->open == network_open)
-        (void)send_fake_arp(dev);
+       struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
+       struct net_device *dev = ifa->ifa_dev->dev;
+
+       /* UP event and is it one of our devices? */
+       if (event == NETDEV_UP && dev->open == network_open)
+               (void)send_fake_arp(dev);
         
-    return NOTIFY_DONE;
+       return NOTIFY_DONE;
 }
 
 static struct notifier_block notifier_inetdev = {
-    .notifier_call  = inetdev_notify,
-    .next           = NULL,
-    .priority       = 0
+       .notifier_call  = inetdev_notify,
+       .next           = NULL,
+       .priority       = 0
 };
 
 static struct xenbus_device_id netfront_ids[] = {
@@ -1341,72 +1346,50 @@
 
 static int wait_for_netif(void)
 {
-    int err = 0;
-    int i;
-
-    /*
-     * We should figure out how many and which devices we need to
-     * proceed and only wait for those.  For now, continue once the
-     * first device is around.
-     */
-    for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
-    {
-        set_current_state(TASK_INTERRUPTIBLE);
-        schedule_timeout(1);
-    }
-
-    if (netif_state != NETIF_STATE_CONNECTED) {
-        WPRINTK("Timeout connecting to device!\n");
-        err = -ENOSYS;
-    }
-    return err;
+       int err = 0;
+       int i;
+
+       /*
+        * We should figure out how many and which devices we need to
+        * proceed and only wait for those.  For now, continue once the
+        * first device is around.
+        */
+       for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
+       {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(1);
+       }
+
+       if (netif_state != NETIF_STATE_CONNECTED) {
+               WPRINTK("Timeout connecting to device!\n");
+               err = -ENOSYS;
+       }
+       return err;
 }
 
 static int __init netif_init(void)
 {
-    int err = 0;
-
-    if (xen_start_info->flags & SIF_INITDOMAIN)
-        return 0;
-
-    if ((err = xennet_proc_init()) != 0)
-        return err;
-
-    IPRINTK("Initialising virtual ethernet driver.\n");
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    IPRINTK("Using grant tables.\n"); 
-
-    /* A grant for every tx ring slot */
-    if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
-                                      &gref_tx_head) < 0) {
-        printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
-        return 1;
-    }
-    /* A grant for every rx ring slot */
-    if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
-                                      &gref_rx_head) < 0) {
-        printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
-        return 1;
-    }
-#endif
-
-
-    (void)register_inetaddr_notifier(&notifier_inetdev);
-
-    init_net_xenbus();
-
-    wait_for_netif();
-
-    return err;
+       int err = 0;
+
+       if (xen_start_info->flags & SIF_INITDOMAIN)
+               return 0;
+
+       if ((err = xennet_proc_init()) != 0)
+               return err;
+
+       IPRINTK("Initialising virtual ethernet driver.\n");
+
+       (void)register_inetaddr_notifier(&notifier_inetdev);
+
+       init_net_xenbus();
+
+       wait_for_netif();
+
+       return err;
 }
 
 static void netif_exit(void)
 {
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_free_grant_references(gref_tx_head);
-    gnttab_free_grant_references(gref_rx_head);
-#endif
 }
 
 #ifdef CONFIG_PROC_FS
@@ -1416,147 +1399,159 @@
 #define TARGET_CUR 2UL
 
 static int xennet_proc_read(
-    char *page, char **start, off_t off, int count, int *eof, void *data)
-{
-    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
-    struct net_private *np = netdev_priv(dev);
-    int len = 0, which_target = (long)data & 3;
+       char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+       struct net_device *dev =
+               (struct net_device *)((unsigned long)data & ~3UL);
+       struct net_private *np = netdev_priv(dev);
+       int len = 0, which_target = (long)data & 3;
     
-    switch (which_target)
-    {
-    case TARGET_MIN:
-        len = sprintf(page, "%d\n", np->rx_min_target);
-        break;
-    case TARGET_MAX:
-        len = sprintf(page, "%d\n", np->rx_max_target);
-        break;
-    case TARGET_CUR:
-        len = sprintf(page, "%d\n", np->rx_target);
-        break;
-    }
-
-    *eof = 1;
-    return len;
+       switch (which_target)
+       {
+       case TARGET_MIN:
+               len = sprintf(page, "%d\n", np->rx_min_target);
+               break;
+       case TARGET_MAX:
+               len = sprintf(page, "%d\n", np->rx_max_target);
+               break;
+       case TARGET_CUR:
+               len = sprintf(page, "%d\n", np->rx_target);
+               break;
+       }
+
+       *eof = 1;
+       return len;
 }
 
 static int xennet_proc_write(
-    struct file *file, const char __user *buffer,
-    unsigned long count, void *data)
-{
-    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
-    struct net_private *np = netdev_priv(dev);
-    int which_target = (long)data & 3;
-    char string[64];
-    long target;
-
-    if (!capable(CAP_SYS_ADMIN))
-        return -EPERM;
-
-    if (count <= 1)
-        return -EBADMSG; /* runt */
-    if (count > sizeof(string))
-        return -EFBIG;   /* too long */
-
-    if (copy_from_user(string, buffer, count))
-        return -EFAULT;
-    string[sizeof(string)-1] = '\0';
-
-    target = simple_strtol(string, NULL, 10);
-    if (target < RX_MIN_TARGET)
-        target = RX_MIN_TARGET;
-    if (target > RX_MAX_TARGET)
-        target = RX_MAX_TARGET;
-
-    spin_lock(&np->rx_lock);
-
-    switch (which_target)
-    {
-    case TARGET_MIN:
-        if (target > np->rx_max_target)
-            np->rx_max_target = target;
-        np->rx_min_target = target;
-        if (target > np->rx_target)
-            np->rx_target = target;
-        break;
-    case TARGET_MAX:
-        if (target < np->rx_min_target)
-            np->rx_min_target = target;
-        np->rx_max_target = target;
-        if (target < np->rx_target)
-            np->rx_target = target;
-        break;
-    case TARGET_CUR:
-        break;
-    }
-
-    network_alloc_rx_buffers(dev);
-
-    spin_unlock(&np->rx_lock);
-
-    return count;
+       struct file *file, const char __user *buffer,
+       unsigned long count, void *data)
+{
+       struct net_device *dev =
+               (struct net_device *)((unsigned long)data & ~3UL);
+       struct net_private *np = netdev_priv(dev);
+       int which_target = (long)data & 3;
+       char string[64];
+       long target;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (count <= 1)
+               return -EBADMSG; /* runt */
+       if (count > sizeof(string))
+               return -EFBIG;   /* too long */
+
+       if (copy_from_user(string, buffer, count))
+               return -EFAULT;
+       string[sizeof(string)-1] = '\0';
+
+       target = simple_strtol(string, NULL, 10);
+       if (target < RX_MIN_TARGET)
+               target = RX_MIN_TARGET;
+       if (target > RX_MAX_TARGET)
+               target = RX_MAX_TARGET;
+
+       spin_lock(&np->rx_lock);
+
+       switch (which_target)
+       {
+       case TARGET_MIN:
+               if (target > np->rx_max_target)
+                       np->rx_max_target = target;
+               np->rx_min_target = target;
+               if (target > np->rx_target)
+                       np->rx_target = target;
+               break;
+       case TARGET_MAX:
+               if (target < np->rx_min_target)
+                       np->rx_min_target = target;
+               np->rx_max_target = target;
+               if (target < np->rx_target)
+                       np->rx_target = target;
+               break;
+       case TARGET_CUR:
+               break;
+       }
+
+       network_alloc_rx_buffers(dev);
+
+       spin_unlock(&np->rx_lock);
+
+       return count;
 }
 
 static int xennet_proc_init(void)
 {
-    if (proc_mkdir("xen/net", NULL) == NULL)
-        return -ENOMEM;
-    return 0;
+       if (proc_mkdir("xen/net", NULL) == NULL)
+               return -ENOMEM;
+       return 0;
 }
 
 static int xennet_proc_addif(struct net_device *dev)
 {
-    struct proc_dir_entry *dir, *min, *max, *cur;
-    char name[30];
-
-    sprintf(name, "xen/net/%s", dev->name);
-
-    dir = proc_mkdir(name, NULL);
-    if (!dir)
-        goto nomem;
-
-    min = create_proc_entry("rxbuf_min", 0644, dir);
-    max = create_proc_entry("rxbuf_max", 0644, dir);
-    cur = create_proc_entry("rxbuf_cur", 0444, dir);
-    if (!min || !max || !cur)
-        goto nomem;
-
-    min->read_proc  = xennet_proc_read;
-    min->write_proc = xennet_proc_write;
-    min->data       = (void *)((unsigned long)dev | TARGET_MIN);
-
-    max->read_proc  = xennet_proc_read;
-    max->write_proc = xennet_proc_write;
-    max->data       = (void *)((unsigned long)dev | TARGET_MAX);
-
-    cur->read_proc  = xennet_proc_read;
-    cur->write_proc = xennet_proc_write;
-    cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
-
-    return 0;
+       struct proc_dir_entry *dir, *min, *max, *cur;
+       char name[30];
+
+       sprintf(name, "xen/net/%s", dev->name);
+
+       dir = proc_mkdir(name, NULL);
+       if (!dir)
+               goto nomem;
+
+       min = create_proc_entry("rxbuf_min", 0644, dir);
+       max = create_proc_entry("rxbuf_max", 0644, dir);
+       cur = create_proc_entry("rxbuf_cur", 0444, dir);
+       if (!min || !max || !cur)
+               goto nomem;
+
+       min->read_proc  = xennet_proc_read;
+       min->write_proc = xennet_proc_write;
+       min->data       = (void *)((unsigned long)dev | TARGET_MIN);
+
+       max->read_proc  = xennet_proc_read;
+       max->write_proc = xennet_proc_write;
+       max->data       = (void *)((unsigned long)dev | TARGET_MAX);
+
+       cur->read_proc  = xennet_proc_read;
+       cur->write_proc = xennet_proc_write;
+       cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
+
+       return 0;
 
  nomem:
-    xennet_proc_delif(dev);
-    return -ENOMEM;
+       xennet_proc_delif(dev);
+       return -ENOMEM;
 }
 
 static void xennet_proc_delif(struct net_device *dev)
 {
-    char name[30];
-
-    sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
-    remove_proc_entry(name, NULL);
-
-    sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
-    remove_proc_entry(name, NULL);
-
-    sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
-    remove_proc_entry(name, NULL);
-
-    sprintf(name, "xen/net/%s", dev->name);
-    remove_proc_entry(name, NULL);
+       char name[30];
+
+       sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
+       remove_proc_entry(name, NULL);
+
+       sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
+       remove_proc_entry(name, NULL);
+
+       sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
+       remove_proc_entry(name, NULL);
+
+       sprintf(name, "xen/net/%s", dev->name);
+       remove_proc_entry(name, NULL);
 }
 
 #endif
 
 module_init(netif_init);
 module_exit(netif_exit);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Fri Sep 16 
18:07:50 2005
@@ -130,12 +130,12 @@
                 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
                     return -EINVAL;
 
-                if ( (rc = direct_remap_pfn_range(vma->vm_mm, 
-                                                   msg[j].va&PAGE_MASK, 
-                                                   msg[j].mfn, 
-                                                   msg[j].npages<<PAGE_SHIFT, 
-                                                   vma->vm_page_prot,
-                                                   mmapcmd.dom)) < 0 )
+                if ( (rc = direct_remap_pfn_range(vma,
+                                                  msg[j].va&PAGE_MASK, 
+                                                  msg[j].mfn, 
+                                                  msg[j].npages<<PAGE_SHIFT, 
+                                                  vma->vm_page_prot,
+                                                  mmapcmd.dom)) < 0 )
                     return rc;
             }
         }
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Fri Sep 16 
18:07:50 2005
@@ -460,7 +460,7 @@
 #define kern_addr_valid(addr)  (1)
 #endif /* !CONFIG_DISCONTIGMEM */
 
-int direct_remap_pfn_range(struct mm_struct *mm,
+int direct_remap_pfn_range(struct vm_area_struct *vma,
                             unsigned long address, 
                             unsigned long mfn,
                             unsigned long size, 
@@ -474,10 +474,10 @@
                     unsigned long size);
 
 #define io_remap_page_range(vma,from,phys,size,prot) \
-direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO)
+direct_remap_pfn_range(vma,from,(phys)>>PAGE_SHIFT,size,prot,DOMID_IO)
 
 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
-direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO)
+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
 
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0
diff -r 7cccdb49af75 -r ffbc98d735bd 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep 16 
18:06:42 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep 16 
18:07:50 2005
@@ -526,7 +526,7 @@
 
 #define DOMID_LOCAL (0xFFFFU)
 
-int direct_remap_pfn_range(struct mm_struct *mm,
+int direct_remap_pfn_range(struct vm_area_struct *vma,
                             unsigned long address,
                             unsigned long mfn,
                             unsigned long size,
@@ -542,10 +542,10 @@
                     unsigned long size);
 
 #define io_remap_page_range(vma, vaddr, paddr, size, prot)             \
-               
direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO)
+               
direct_remap_pfn_range(vma,vaddr,(paddr)>>PAGE_SHIFT,size,prot,DOMID_IO)
 
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               
direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO)
+               direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
 
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>