WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Clean up and re-indent netback driver.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Clean up and re-indent netback driver.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 16 Sep 2005 13:28:11 +0000
Delivery-date: Fri, 16 Sep 2005 13:26:39 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 7fbaf67a0af5048f94640ea0e44b88408b21b791
# Parent  8bb3f2567b8c5c4b0d93dd1a8e2ee5ea0ea9f103
Clean up and re-indent netback driver.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 8bb3f2567b8c -r 7fbaf67a0af5 
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 13:06:49 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 13:27:01 2005
@@ -18,16 +18,10 @@
 #include <asm-xen/xen-public/io/netif.h>
 #include <asm/io.h>
 #include <asm/pgalloc.h>
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
 #include <asm-xen/xen-public/grant_table.h>
 #include <asm-xen/gnttab.h>
 
 #define GRANT_INVALID_REF (0xFFFF)
-
-#endif
-
-
 
 #if 0
 #define ASSERT(_p) \
@@ -44,74 +38,73 @@
 #define WPRINTK(fmt, args...) \
     printk(KERN_WARNING "xen_net: " fmt, ##args)
 
+typedef struct netif_st {
+       /* Unique identifier for this interface. */
+       domid_t          domid;
+       unsigned int     handle;
 
-typedef struct netif_st {
-    /* Unique identifier for this interface. */
-    domid_t          domid;
-    unsigned int     handle;
+       u8               fe_dev_addr[6];
 
-    u8               fe_dev_addr[6];
+       /* Physical parameters of the comms window. */
+       unsigned long    tx_shmem_frame;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       u16              tx_shmem_handle;
+       unsigned long    tx_shmem_vaddr; 
+       grant_ref_t      tx_shmem_ref; 
+#endif
+       unsigned long    rx_shmem_frame;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       u16              rx_shmem_handle;
+       unsigned long    rx_shmem_vaddr; 
+       grant_ref_t      rx_shmem_ref; 
+#endif
+       unsigned int     evtchn;
+       unsigned int     remote_evtchn;
 
-    /* Physical parameters of the comms window. */
-    unsigned long    tx_shmem_frame;
+       /* The shared rings and indexes. */
+       netif_tx_interface_t *tx;
+       netif_rx_interface_t *rx;
+
+       /* Private indexes into shared ring. */
+       NETIF_RING_IDX rx_req_cons;
+       NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    u16              tx_shmem_handle;
-    unsigned long    tx_shmem_vaddr; 
-    grant_ref_t      tx_shmem_ref; 
+       NETIF_RING_IDX rx_resp_prod_copy;
 #endif
-    unsigned long    rx_shmem_frame;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    u16              rx_shmem_handle;
-    unsigned long    rx_shmem_vaddr; 
-    grant_ref_t      rx_shmem_ref; 
-#endif
-    unsigned int     evtchn;
-    unsigned int     remote_evtchn;
+       NETIF_RING_IDX tx_req_cons;
+       NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
 
-    /* The shared rings and indexes. */
-    netif_tx_interface_t *tx;
-    netif_rx_interface_t *rx;
+       /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+       unsigned long   credit_bytes;
+       unsigned long   credit_usec;
+       unsigned long   remaining_credit;
+       struct timer_list credit_timeout;
 
-    /* Private indexes into shared ring. */
-    NETIF_RING_IDX rx_req_cons;
-    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    NETIF_RING_IDX rx_resp_prod_copy; /* private version of shared variable */
-#endif
-    NETIF_RING_IDX tx_req_cons;
-    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+       /* Miscellaneous private stuff. */
+       enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+       int active;
+       struct list_head list;  /* scheduling list */
+       atomic_t         refcnt;
+       struct net_device *dev;
+       struct net_device_stats stats;
 
-    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
-    unsigned long   credit_bytes;
-    unsigned long   credit_usec;
-    unsigned long   remaining_credit;
-    struct timer_list credit_timeout;
-
-    /* Miscellaneous private stuff. */
-    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
-    int active;
-    struct list_head list;  /* scheduling list */
-    atomic_t         refcnt;
-    struct net_device *dev;
-    struct net_device_stats stats;
-
-    struct work_struct free_work;
+       struct work_struct free_work;
 } netif_t;
 
 void netif_creditlimit(netif_t *netif);
 int  netif_disconnect(netif_t *netif);
 
 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
-void free_netif_callback(netif_t *netif);
+void free_netif(netif_t *netif);
 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
              unsigned long rx_ring_ref, unsigned int evtchn);
 
 #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
-#define netif_put(_b)                             \
-    do {                                          \
-        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
-            free_netif_callback(_b);              \
-    } while (0)
+#define netif_put(_b)                                          \
+       do {                                                    \
+               if ( atomic_dec_and_test(&(_b)->refcnt) )       \
+                       free_netif(_b);                         \
+       } while (0)
 
 void netif_xenbus_init(void);
 
@@ -123,3 +116,13 @@
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
 
 #endif /* __NETIF__BACKEND__COMMON_H__ */
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 8bb3f2567b8c -r 7fbaf67a0af5 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep 16 
13:06:49 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep 16 
13:27:01 2005
@@ -11,104 +11,105 @@
 
 static void __netif_up(netif_t *netif)
 {
-    struct net_device *dev = netif->dev;
-    spin_lock_bh(&dev->xmit_lock);
-    netif->active = 1;
-    spin_unlock_bh(&dev->xmit_lock);
-    (void)bind_evtchn_to_irqhandler(
-        netif->evtchn, netif_be_int, 0, dev->name, netif);
-    netif_schedule_work(netif);
+       struct net_device *dev = netif->dev;
+       spin_lock_bh(&dev->xmit_lock);
+       netif->active = 1;
+       spin_unlock_bh(&dev->xmit_lock);
+       (void)bind_evtchn_to_irqhandler(
+               netif->evtchn, netif_be_int, 0, dev->name, netif);
+       netif_schedule_work(netif);
 }
 
 static void __netif_down(netif_t *netif)
 {
-    struct net_device *dev = netif->dev;
-    spin_lock_bh(&dev->xmit_lock);
-    netif->active = 0;
-    spin_unlock_bh(&dev->xmit_lock);
-    unbind_evtchn_from_irqhandler(netif->evtchn, netif);
-    netif_deschedule_work(netif);
+       struct net_device *dev = netif->dev;
+       spin_lock_bh(&dev->xmit_lock);
+       netif->active = 0;
+       spin_unlock_bh(&dev->xmit_lock);
+       unbind_evtchn_from_irqhandler(netif->evtchn, netif);
+       netif_deschedule_work(netif);
 }
 
 static int net_open(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    if (netif->status == CONNECTED)
-        __netif_up(netif);
-    netif_start_queue(dev);
-    return 0;
+       netif_t *netif = netdev_priv(dev);
+       if (netif->status == CONNECTED)
+               __netif_up(netif);
+       netif_start_queue(dev);
+       return 0;
 }
 
 static int net_close(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    netif_stop_queue(dev);
-    if (netif->status == CONNECTED)
-        __netif_down(netif);
-    return 0;
+       netif_t *netif = netdev_priv(dev);
+       netif_stop_queue(dev);
+       if (netif->status == CONNECTED)
+               __netif_down(netif);
+       return 0;
 }
 
 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
 {
-    int err = 0, i;
-    struct net_device *dev;
-    netif_t *netif;
-    char name[IFNAMSIZ] = {};
-
-    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
-    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
-    if (dev == NULL) {
-        DPRINTK("Could not create netif: out of memory\n");
-        return NULL;
-    }
-
-    netif = netdev_priv(dev);
-    memset(netif, 0, sizeof(*netif));
-    netif->domid  = domid;
-    netif->handle = handle;
-    netif->status = DISCONNECTED;
-    atomic_set(&netif->refcnt, 0);
-    netif->dev = dev;
-
-    netif->credit_bytes = netif->remaining_credit = ~0UL;
-    netif->credit_usec  = 0UL;
-    init_timer(&netif->credit_timeout);
-
-    dev->hard_start_xmit = netif_be_start_xmit;
-    dev->get_stats       = netif_be_get_stats;
-    dev->open            = net_open;
-    dev->stop            = net_close;
-    dev->features        = NETIF_F_NO_CSUM;
-
-    /* Disable queuing. */
-    dev->tx_queue_len = 0;
-
-    for (i = 0; i < ETH_ALEN; i++)
-       if (be_mac[i] != 0)
-           break;
-    if (i == ETH_ALEN) {
-        /*
-         * Initialise a dummy MAC address. We choose the numerically largest
-         * non-broadcast address to prevent the address getting stolen by an
-         * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
-         */ 
-        memset(dev->dev_addr, 0xFF, ETH_ALEN);
-        dev->dev_addr[0] &= ~0x01;
-    } else
-        memcpy(dev->dev_addr, be_mac, ETH_ALEN);
-
-    rtnl_lock();
-    err = register_netdevice(dev);
-    rtnl_unlock();
-    if (err) {
-        DPRINTK("Could not register new net device %s: err=%d\n",
-                dev->name, err);
-        free_netdev(dev);
-        return NULL;
-    }
-
-    DPRINTK("Successfully created netif\n");
-    return netif;
+       int err = 0, i;
+       struct net_device *dev;
+       netif_t *netif;
+       char name[IFNAMSIZ] = {};
+
+       snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+       dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+       if (dev == NULL) {
+               DPRINTK("Could not create netif: out of memory\n");
+               return NULL;
+       }
+
+       netif = netdev_priv(dev);
+       memset(netif, 0, sizeof(*netif));
+       netif->domid  = domid;
+       netif->handle = handle;
+       netif->status = DISCONNECTED;
+       atomic_set(&netif->refcnt, 0);
+       netif->dev = dev;
+
+       netif->credit_bytes = netif->remaining_credit = ~0UL;
+       netif->credit_usec  = 0UL;
+       init_timer(&netif->credit_timeout);
+
+       dev->hard_start_xmit = netif_be_start_xmit;
+       dev->get_stats       = netif_be_get_stats;
+       dev->open            = net_open;
+       dev->stop            = net_close;
+       dev->features        = NETIF_F_NO_CSUM;
+
+       /* Disable queuing. */
+       dev->tx_queue_len = 0;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               if (be_mac[i] != 0)
+                       break;
+       if (i == ETH_ALEN) {
+               /*
+                * Initialise a dummy MAC address. We choose the numerically
+                * largest non-broadcast address to prevent the address getting
+                * stolen by an Ethernet bridge for STP purposes.
+                 * (FE:FF:FF:FF:FF:FF) 
+                */ 
+               memset(dev->dev_addr, 0xFF, ETH_ALEN);
+               dev->dev_addr[0] &= ~0x01;
+       } else
+               memcpy(dev->dev_addr, be_mac, ETH_ALEN);
+
+       rtnl_lock();
+       err = register_netdevice(dev);
+       rtnl_unlock();
+       if (err) {
+               DPRINTK("Could not register new net device %s: err=%d\n",
+                       dev->name, err);
+               free_netdev(dev);
+               return NULL;
+       }
+
+       DPRINTK("Successfully created netif\n");
+       return netif;
 }
 
 static int map_frontend_pages(netif_t *netif, unsigned long localaddr,
@@ -116,191 +117,204 @@
                               unsigned long rx_ring_ref)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    struct gnttab_map_grant_ref op;
-
-    /* Map: Use the Grant table reference */
-    op.host_addr = localaddr;
-    op.flags     = GNTMAP_host_map;
-    op.ref       = tx_ring_ref;
-    op.dom       = netif->domid;
+       struct gnttab_map_grant_ref op;
+
+       /* Map: Use the Grant table reference */
+       op.host_addr = localaddr;
+       op.flags     = GNTMAP_host_map;
+       op.ref       = tx_ring_ref;
+       op.dom       = netif->domid;
     
-    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
-    if (op.handle < 0) { 
-        DPRINTK(" Grant table operation failure mapping tx_ring_ref!\n");
-        return op.handle;
-    }
-
-    netif->tx_shmem_ref    = tx_ring_ref;
-    netif->tx_shmem_handle = op.handle;
-    netif->tx_shmem_vaddr  = localaddr;
-
-    /* Map: Use the Grant table reference */
-    op.host_addr = localaddr + PAGE_SIZE;
-    op.flags     = GNTMAP_host_map;
-    op.ref       = rx_ring_ref;
-    op.dom       = netif->domid;
-
-    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
-    if (op.handle < 0) { 
-        DPRINTK(" Grant table operation failure mapping rx_ring_ref!\n");
-        return op.handle;
-    }
-
-    netif->rx_shmem_ref    = rx_ring_ref;
-    netif->rx_shmem_handle = op.handle;
-    netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
+       BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
+       if (op.handle < 0) { 
+               DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
+               return op.handle;
+       }
+
+       netif->tx_shmem_ref    = tx_ring_ref;
+       netif->tx_shmem_handle = op.handle;
+       netif->tx_shmem_vaddr  = localaddr;
+
+       /* Map: Use the Grant table reference */
+       op.host_addr = localaddr + PAGE_SIZE;
+       op.flags     = GNTMAP_host_map;
+       op.ref       = rx_ring_ref;
+       op.dom       = netif->domid;
+
+       BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
+       if (op.handle < 0) { 
+               DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
+               return op.handle;
+       }
+
+       netif->rx_shmem_ref    = rx_ring_ref;
+       netif->rx_shmem_handle = op.handle;
+       netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
 
 #else
-    pgprot_t      prot = __pgprot(_KERNPG_TABLE);
-    int           err;
-
-    err = direct_remap_pfn_range(&init_mm, localaddr,
-                                 tx_ring_ref, PAGE_SIZE,
-                                 prot, netif->domid); 
+       pgprot_t prot = __pgprot(_KERNPG_TABLE);
+       int      err;
+
+       err = direct_remap_pfn_range(
+               &init_mm, localaddr,
+               tx_ring_ref, PAGE_SIZE,
+               prot, netif->domid); 
     
-    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
-                                 rx_ring_ref, PAGE_SIZE,
-                                 prot, netif->domid);
-
-    if (err)
-       return err;
+       err |= direct_remap_pfn_range(
+               &init_mm, localaddr + PAGE_SIZE,
+               rx_ring_ref, PAGE_SIZE,
+               prot, netif->domid);
+
+       if (err)
+               return err;
 #endif
 
-    return 0;
+       return 0;
 }
 
 static void unmap_frontend_pages(netif_t *netif)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    struct gnttab_unmap_grant_ref op;
-
-    op.host_addr    = netif->tx_shmem_vaddr;
-    op.handle       = netif->tx_shmem_handle;
-    op.dev_bus_addr = 0;
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
-
-    op.host_addr    = netif->rx_shmem_vaddr;
-    op.handle       = netif->rx_shmem_handle;
-    op.dev_bus_addr = 0;
-    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+       struct gnttab_unmap_grant_ref op;
+
+       op.host_addr    = netif->tx_shmem_vaddr;
+       op.handle       = netif->tx_shmem_handle;
+       op.dev_bus_addr = 0;
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
+
+       op.host_addr    = netif->rx_shmem_vaddr;
+       op.handle       = netif->rx_shmem_handle;
+       op.dev_bus_addr = 0;
+       BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
 #endif
 
-    return; 
+       return; 
 }
 
 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
              unsigned long rx_ring_ref, unsigned int evtchn)
 {
-    struct vm_struct *vma;
-    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
-    int err;
-
-    vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
-    if (vma == NULL)
-        return -ENOMEM;
-
-    err = map_frontend_pages(netif, (unsigned long)vma->addr, tx_ring_ref,
-                             rx_ring_ref);
-    if (err) {
-        vfree(vma->addr);
-       return err;
-    }
-
-    op.u.bind_interdomain.dom1 = DOMID_SELF;
-    op.u.bind_interdomain.dom2 = netif->domid;
-    op.u.bind_interdomain.port1 = 0;
-    op.u.bind_interdomain.port2 = evtchn;
-    err = HYPERVISOR_event_channel_op(&op);
-    if (err) {
-       unmap_frontend_pages(netif);
-       vfree(vma->addr);
-       return err;
-    }
-
-    netif->evtchn = op.u.bind_interdomain.port1;
-    netif->remote_evtchn = evtchn;
-
-    netif->tx = (netif_tx_interface_t *)vma->addr;
-    netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
-    netif->tx->resp_prod = netif->rx->resp_prod = 0;
-    netif_get(netif);
-    wmb(); /* Other CPUs see new state before interface is started. */
-
-    rtnl_lock();
-    netif->status = CONNECTED;
-    wmb();
-    if (netif_running(netif->dev))
-        __netif_up(netif);
-    rtnl_unlock();
-
-    return 0;
-}
-
-static void free_netif(void *arg)
-{
-    evtchn_op_t op = { .cmd = EVTCHNOP_close };
-    netif_t *netif = (netif_t *)arg;
-
-    /*
-     * These can't be done in netif_disconnect() because at that point there
-     * may be outstanding requests in the network stack whose asynchronous
-     * responses must still be notified to the remote driver.
-     */
-
-    op.u.close.port = netif->evtchn;
-    op.u.close.dom = DOMID_SELF;
-    HYPERVISOR_event_channel_op(&op);
-    op.u.close.port = netif->remote_evtchn;
-    op.u.close.dom = netif->domid;
-    HYPERVISOR_event_channel_op(&op);
-
-    unregister_netdev(netif->dev);
-
-    if (netif->tx) {
-       unmap_frontend_pages(netif);
-       vfree(netif->tx); /* Frees netif->rx as well. */
-    }
-
-    free_netdev(netif->dev);
-}
-
-void free_netif_callback(netif_t *netif)
-{
-    INIT_WORK(&netif->free_work, free_netif, (void *)netif);
-    schedule_work(&netif->free_work);
+       struct vm_struct *vma;
+       evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
+       int err;
+
+       vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
+       if (vma == NULL)
+               return -ENOMEM;
+
+       err = map_frontend_pages(
+               netif, (unsigned long)vma->addr, tx_ring_ref, rx_ring_ref);
+       if (err) {
+               vfree(vma->addr);
+               return err;
+       }
+
+       op.u.bind_interdomain.dom1 = DOMID_SELF;
+       op.u.bind_interdomain.dom2 = netif->domid;
+       op.u.bind_interdomain.port1 = 0;
+       op.u.bind_interdomain.port2 = evtchn;
+       err = HYPERVISOR_event_channel_op(&op);
+       if (err) {
+               unmap_frontend_pages(netif);
+               vfree(vma->addr);
+               return err;
+       }
+
+       netif->evtchn = op.u.bind_interdomain.port1;
+       netif->remote_evtchn = evtchn;
+
+       netif->tx = (netif_tx_interface_t *)vma->addr;
+       netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+       netif->tx->resp_prod = netif->rx->resp_prod = 0;
+       netif_get(netif);
+       wmb(); /* Other CPUs see new state before interface is started. */
+
+       rtnl_lock();
+       netif->status = CONNECTED;
+       wmb();
+       if (netif_running(netif->dev))
+               __netif_up(netif);
+       rtnl_unlock();
+
+       return 0;
+}
+
+static void free_netif_callback(void *arg)
+{
+       evtchn_op_t op = { .cmd = EVTCHNOP_close };
+       netif_t *netif = (netif_t *)arg;
+
+       /*
+        * These can't be done in netif_disconnect() because at that point
+        * there may be outstanding requests in the network stack whose
+        * asynchronous responses must still be notified to the remote driver.
+        */
+
+       op.u.close.port = netif->evtchn;
+       op.u.close.dom = DOMID_SELF;
+       HYPERVISOR_event_channel_op(&op);
+       op.u.close.port = netif->remote_evtchn;
+       op.u.close.dom = netif->domid;
+       HYPERVISOR_event_channel_op(&op);
+
+       unregister_netdev(netif->dev);
+
+       if (netif->tx) {
+               unmap_frontend_pages(netif);
+               vfree(netif->tx); /* Frees netif->rx as well. */
+       }
+
+       free_netdev(netif->dev);
+}
+
+void free_netif(netif_t *netif)
+{
+       INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
+       schedule_work(&netif->free_work);
 }
 
 void netif_creditlimit(netif_t *netif)
 {
 #if 0
-    /* Set the credit limit (reset remaining credit to new limit). */
-    netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
-    netif->credit_usec = creditlimit->period_usec;
-
-    if (netif->status == CONNECTED) {
-        /*
-         * Schedule work so that any packets waiting under previous credit 
-         * limit are dealt with (acts like a replenishment point).
-         */
-        netif->credit_timeout.expires = jiffies;
-        netif_schedule_work(netif);
-    }
+       /* Set the credit limit (reset remaining credit to new limit). */
+       netif->credit_bytes     = creditlimit->credit_bytes;
+       netif->remaining_credit = creditlimit->credit_bytes;
+       netif->credit_usec      = creditlimit->period_usec;
+
+       if (netif->status == CONNECTED) {
+               /*
+                * Schedule work so that any packets waiting under previous
+                * credit limit are dealt with (acts as a replenishment point).
+                */
+               netif->credit_timeout.expires = jiffies;
+               netif_schedule_work(netif);
+       }
 #endif
 }
 
 int netif_disconnect(netif_t *netif)
 {
 
-    if (netif->status == CONNECTED) {
-        rtnl_lock();
-        netif->status = DISCONNECTING;
-        wmb();
-        if (netif_running(netif->dev))
-            __netif_down(netif);
-        rtnl_unlock();
-        netif_put(netif);
-        return 0; /* Caller should not send response message. */
-    }
-
-    return 1;
-}
+       if (netif->status == CONNECTED) {
+               rtnl_lock();
+               netif->status = DISCONNECTING;
+               wmb();
+               if (netif_running(netif->dev))
+                       __netif_down(netif);
+               rtnl_unlock();
+               netif_put(netif);
+               return 0; /* Caller should not send response message. */
+       }
+
+       return 1;
+}
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 8bb3f2567b8c -r 7fbaf67a0af5 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep 16 
13:06:49 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep 16 
13:27:01 2005
@@ -57,8 +57,8 @@
 #define PKT_PROT_LEN 64
 
 static struct {
-    netif_tx_request_t req;
-    netif_t *netif;
+       netif_tx_request_t req;
+       netif_t *netif;
 } pending_tx_info[MAX_PENDING_REQS];
 static u16 pending_ring[MAX_PENDING_REQS];
 typedef unsigned int PEND_RING_IDX;
@@ -91,49 +91,49 @@
 
 static unsigned long alloc_mfn(void)
 {
-    unsigned long mfn = 0, flags;
-    struct xen_memory_reservation reservation = {
-        .extent_start = mfn_list,
-        .nr_extents   = MAX_MFN_ALLOC,
-        .extent_order = 0,
-        .domid        = DOMID_SELF
-    };
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( unlikely(alloc_index == 0) )
-        alloc_index = HYPERVISOR_memory_op(
-            XENMEM_increase_reservation, &reservation);
-    if ( alloc_index != 0 )
-        mfn = mfn_list[--alloc_index];
-    spin_unlock_irqrestore(&mfn_lock, flags);
-    return mfn;
+       unsigned long mfn = 0, flags;
+       struct xen_memory_reservation reservation = {
+               .extent_start = mfn_list,
+               .nr_extents   = MAX_MFN_ALLOC,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       spin_lock_irqsave(&mfn_lock, flags);
+       if ( unlikely(alloc_index == 0) )
+               alloc_index = HYPERVISOR_memory_op(
+                       XENMEM_increase_reservation, &reservation);
+       if ( alloc_index != 0 )
+               mfn = mfn_list[--alloc_index];
+       spin_unlock_irqrestore(&mfn_lock, flags);
+       return mfn;
 }
 
 #ifndef CONFIG_XEN_NETDEV_GRANT
 static void free_mfn(unsigned long mfn)
 {
-    unsigned long flags;
-    struct xen_memory_reservation reservation = {
-        .extent_start = &mfn,
-        .nr_extents   = 1,
-        .extent_order = 0,
-        .domid        = DOMID_SELF
-    };
-    spin_lock_irqsave(&mfn_lock, flags);
-    if ( alloc_index != MAX_MFN_ALLOC )
-        mfn_list[alloc_index++] = mfn;
-    else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation)
-              != 1 )
-        BUG();
-    spin_unlock_irqrestore(&mfn_lock, flags);
+       unsigned long flags;
+       struct xen_memory_reservation reservation = {
+               .extent_start = &mfn,
+               .nr_extents   = 1,
+               .extent_order = 0,
+               .domid        = DOMID_SELF
+       };
+       spin_lock_irqsave(&mfn_lock, flags);
+       if ( alloc_index != MAX_MFN_ALLOC )
+               mfn_list[alloc_index++] = mfn;
+       else
+               BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                           &reservation) != 1);
+       spin_unlock_irqrestore(&mfn_lock, flags);
 }
 #endif
 
 static inline void maybe_schedule_tx_action(void)
 {
-    smp_mb();
-    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
-         !list_empty(&net_schedule_list) )
-        tasklet_schedule(&net_tx_tasklet);
+       smp_mb();
+       if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+           !list_empty(&net_schedule_list))
+               tasklet_schedule(&net_tx_tasklet);
 }
 
 /*
@@ -142,77 +142,77 @@
  */
 static inline int is_xen_skb(struct sk_buff *skb)
 {
-    extern kmem_cache_t *skbuff_cachep;
-    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
-    return (cp == skbuff_cachep);
+       extern kmem_cache_t *skbuff_cachep;
+       kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+       return (cp == skbuff_cachep);
 }
 
 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-
-    ASSERT(skb->dev == dev);
-
-    /* Drop the packet if the target domain has no receive buffers. */
-    if ( !netif->active || 
-         (netif->rx_req_cons == netif->rx->req_prod) ||
-         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
-        goto drop;
-
-    /*
-     * We do not copy the packet unless:
-     *  1. The data is shared; or
-     *  2. The data is not allocated from our special cache.
-     * NB. We also couldn't cope with fragmented packets, but we won't get
-     *     any because we not advertise the NETIF_F_SG feature.
-     */
-    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
-    {
-        int hlen = skb->data - skb->head;
-        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
-        if ( unlikely(nskb == NULL) )
-            goto drop;
-        skb_reserve(nskb, hlen);
-        __skb_put(nskb, skb->len);
-        if (skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen))
-            BUG();
-        nskb->dev = skb->dev;
-        nskb->proto_csum_valid = skb->proto_csum_valid;
-        dev_kfree_skb(skb);
-        skb = nskb;
-    }
+       netif_t *netif = netdev_priv(dev);
+
+       ASSERT(skb->dev == dev);
+
+       /* Drop the packet if the target domain has no receive buffers. */
+       if (!netif->active || 
+           (netif->rx_req_cons == netif->rx->req_prod) ||
+           ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
+               goto drop;
+
+       /*
+        * We do not copy the packet unless:
+        *  1. The data is shared; or
+        *  2. The data is not allocated from our special cache.
+        * NB. We also couldn't cope with fragmented packets, but we won't get
+        *     any because we not advertise the NETIF_F_SG feature.
+        */
+       if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
+               int hlen = skb->data - skb->head;
+               struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+               if ( unlikely(nskb == NULL) )
+                       goto drop;
+               skb_reserve(nskb, hlen);
+               __skb_put(nskb, skb->len);
+               BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen,
+                                    skb->len + hlen));
+               nskb->dev = skb->dev;
+               nskb->proto_csum_valid = skb->proto_csum_valid;
+               dev_kfree_skb(skb);
+               skb = nskb;
+       }
 #ifdef CONFIG_XEN_NETDEV_GRANT
 #ifdef DEBUG_GRANT
-    printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d id=%04x 
gr=%04x\n",
-           netif->rx->req_prod,
-           netif->rx_req_cons,
-           netif->rx->ring[
-                  MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
-           netif->rx->ring[
-                  MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
-#endif
-#endif
-    netif->rx_req_cons++;
-    netif_get(netif);
-
-    skb_queue_tail(&rx_queue, skb);
-    tasklet_schedule(&net_rx_tasklet);
-
-    return 0;
+       printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
+              "id=%04x gr=%04x\n",
+              netif->rx->req_prod,
+              netif->rx_req_cons,
+              netif->rx->ring[
+                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
+              netif->rx->ring[
+                      MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
+#endif
+#endif
+       netif->rx_req_cons++;
+       netif_get(netif);
+
+       skb_queue_tail(&rx_queue, skb);
+       tasklet_schedule(&net_rx_tasklet);
+
+       return 0;
 
  drop:
-    netif->stats.tx_dropped++;
-    dev_kfree_skb(skb);
-    return 0;
+       netif->stats.tx_dropped++;
+       dev_kfree_skb(skb);
+       return 0;
 }
 
 #if 0
 static void xen_network_done_notify(void)
 {
-    static struct net_device *eth0_dev = NULL;
-    if ( unlikely(eth0_dev == NULL) )
-        eth0_dev = __dev_get_by_name("eth0");
-    netif_rx_schedule(eth0_dev);
+       static struct net_device *eth0_dev = NULL;
+       if (unlikely(eth0_dev == NULL))
+               eth0_dev = __dev_get_by_name("eth0");
+       netif_rx_schedule(eth0_dev);
 }
 /* 
  * Add following to poll() function in NAPI driver (Tigon3 is example):
@@ -221,658 +221,654 @@
  */
 int xen_network_done(void)
 {
-    return skb_queue_empty(&rx_queue);
+       return skb_queue_empty(&rx_queue);
 }
 #endif
 
 static void net_rx_action(unsigned long unused)
 {
-    netif_t *netif = NULL; 
-    s8 status;
-    u16 size, id, evtchn;
-    multicall_entry_t *mcl;
-    mmu_update_t *mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_transfer_t *gop;
-#else
-    struct mmuext_op *mmuext;
-#endif
-    unsigned long vdata, old_mfn, new_mfn;
-    struct sk_buff_head rxq;
-    struct sk_buff *skb;
-    u16 notify_list[NETIF_RX_RING_SIZE];
-    int notify_nr = 0;
-
-    skb_queue_head_init(&rxq);
-
-    mcl = rx_mcl;
-    mmu = rx_mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gop = grant_rx_op;
-#else
-    mmuext = rx_mmuext;
-#endif
-
-    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
-    {
-        netif   = netdev_priv(skb->dev);
-        vdata   = (unsigned long)skb->data;
-        old_mfn = virt_to_mfn(vdata);
-
-        /* Memory squeeze? Back off for an arbitrary while. */
-        if ( (new_mfn = alloc_mfn()) == 0 )
-        {
-            if ( net_ratelimit() )
-                WPRINTK("Memory squeeze in netback driver.\n");
-            mod_timer(&net_timer, jiffies + HZ);
-            skb_queue_head(&rx_queue, skb);
-            break;
-        }
-        /*
-         * Set the new P2M table entry before reassigning the old data page.
-         * Heed the comment in pgtable-2level.h:pte_page(). :-)
-         */
-        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-
-        MULTI_update_va_mapping(mcl, vdata,
-                               pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
-        mcl++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        gop->mfn = old_mfn;
-        gop->domid = netif->domid;
-        gop->ref = netif->rx->ring[
-        MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
-        netif->rx_resp_prod_copy++;
-        gop++;
-#else
-        mcl->op = __HYPERVISOR_mmuext_op;
-        mcl->args[0] = (unsigned long)mmuext;
-        mcl->args[1] = 1;
-        mcl->args[2] = 0;
-        mcl->args[3] = netif->domid;
-        mcl++;
-
-        mmuext->cmd = MMUEXT_REASSIGN_PAGE;
-        mmuext->arg1.mfn = old_mfn;
-        mmuext++;
-#endif
-        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | 
MMU_MACHPHYS_UPDATE;
-        mmu->val = __pa(vdata) >> PAGE_SHIFT;  
-        mmu++;
-
-        __skb_queue_tail(&rxq, skb);
+       netif_t *netif = NULL; 
+       s8 status;
+       u16 size, id, evtchn;
+       multicall_entry_t *mcl;
+       mmu_update_t *mmu;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gnttab_transfer_t *gop;
+#else
+       struct mmuext_op *mmuext;
+#endif
+       unsigned long vdata, old_mfn, new_mfn;
+       struct sk_buff_head rxq;
+       struct sk_buff *skb;
+       u16 notify_list[NETIF_RX_RING_SIZE];
+       int notify_nr = 0;
+
+       skb_queue_head_init(&rxq);
+
+       mcl = rx_mcl;
+       mmu = rx_mmu;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gop = grant_rx_op;
+#else
+       mmuext = rx_mmuext;
+#endif
+
+       while ((skb = skb_dequeue(&rx_queue)) != NULL) {
+               netif   = netdev_priv(skb->dev);
+               vdata   = (unsigned long)skb->data;
+               old_mfn = virt_to_mfn(vdata);
+
+               /* Memory squeeze? Back off for an arbitrary while. */
+               if ((new_mfn = alloc_mfn()) == 0) {
+                       if ( net_ratelimit() )
+                               WPRINTK("Memory squeeze in netback driver.\n");
+                       mod_timer(&net_timer, jiffies + HZ);
+                       skb_queue_head(&rx_queue, skb);
+                       break;
+               }
+               /*
+                * Set the new P2M table entry before reassigning the old data
+                * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
+                */
+               phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] =
+                       new_mfn;
+
+               MULTI_update_va_mapping(mcl, vdata,
+                                       pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
+               mcl++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               gop->mfn = old_mfn;
+               gop->domid = netif->domid;
+               gop->ref = netif->rx->ring[
+                       MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
+               netif->rx_resp_prod_copy++;
+               gop++;
+#else
+               mcl->op = __HYPERVISOR_mmuext_op;
+               mcl->args[0] = (unsigned long)mmuext;
+               mcl->args[1] = 1;
+               mcl->args[2] = 0;
+               mcl->args[3] = netif->domid;
+               mcl++;
+
+               mmuext->cmd = MMUEXT_REASSIGN_PAGE;
+               mmuext->arg1.mfn = old_mfn;
+               mmuext++;
+#endif
+               mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
+                       MMU_MACHPHYS_UPDATE;
+               mmu->val = __pa(vdata) >> PAGE_SHIFT;  
+               mmu++;
+
+               __skb_queue_tail(&rxq, skb);
 
 #ifdef DEBUG_GRANT
-        dump_packet('a', old_mfn, vdata);
-#endif
-        /* Filled the batch queue? */
-        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
-            break;
-    }
-
-    if ( mcl == rx_mcl )
-        return;
-
-    mcl->op = __HYPERVISOR_mmu_update;
-    mcl->args[0] = (unsigned long)rx_mmu;
-    mcl->args[1] = mmu - rx_mmu;
-    mcl->args[2] = 0;
-    mcl->args[3] = DOMID_SELF;
-    mcl++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#else
-    mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#endif
-    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
-        BUG();
-
-    mcl = rx_mcl;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
-                                 gop - grant_rx_op)) { 
-        /* 
-        ** The other side has given us a bad grant ref, or has no headroom, 
-        ** or has gone away. Unfortunately the current grant table code 
-        ** doesn't inform us which is the case, so not much we can do. 
-        */
-        DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) %d "
-                "packets.\n", grant_rx_op[0].domid, gop - grant_rx_op); 
-    }
-    gop = grant_rx_op;
-#else
-    mmuext = rx_mmuext;
-#endif
-    while ( (skb = __skb_dequeue(&rxq)) != NULL )
-    {
-        netif   = netdev_priv(skb->dev);
-        size    = skb->tail - skb->data;
-
-        /* Rederive the machine addresses. */
-        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
-#else
-        old_mfn = mmuext[0].arg1.mfn;
-#endif
-        atomic_set(&(skb_shinfo(skb)->dataref), 1);
-        skb_shinfo(skb)->nr_frags = 0;
-        skb_shinfo(skb)->frag_list = NULL;
-
-        netif->stats.tx_bytes += size;
-        netif->stats.tx_packets++;
-
-        /* The update_va_mapping() must not fail. */
-        BUG_ON(mcl[0].result != 0);
-
-        /* Check the reassignment error code. */
-        status = NETIF_RSP_OKAY;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if(gop->status != 0) { 
-            DPRINTK("Bad status %d from grant transfer to DOM%u\n", 
-                    gop->status, netif->domid);
-            /* XXX SMH: should free 'old_mfn' here */
-            status = NETIF_RSP_ERROR; 
-        } 
-#else
-        if ( unlikely(mcl[1].result != 0) )
-        {
-            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
-            free_mfn(old_mfn);
-            status = NETIF_RSP_ERROR;
-        }
-#endif
-        evtchn = netif->evtchn;
-        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
-        if ( make_rx_response(netif, id, status,
-                              (old_mfn << PAGE_SHIFT) | /* XXX */
-                              ((unsigned long)skb->data & ~PAGE_MASK),
-                              size, skb->proto_csum_valid) &&
-             (rx_notify[evtchn] == 0) )
-        {
-            rx_notify[evtchn] = 1;
-            notify_list[notify_nr++] = evtchn;
-        }
-
-        netif_put(netif);
-        dev_kfree_skb(skb);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mcl++;
-        gop++;
-#else
-        mcl += 2;
-        mmuext += 1;
-#endif
-    }
-
-    while ( notify_nr != 0 )
-    {
-        evtchn = notify_list[--notify_nr];
-        rx_notify[evtchn] = 0;
-        notify_via_evtchn(evtchn);
-    }
-
-  out: 
-    /* More work to do? */
-    if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
-        tasklet_schedule(&net_rx_tasklet);
+               dump_packet('a', old_mfn, vdata);
+#endif
+               /* Filled the batch queue? */
+               if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
+                       break;
+       }
+
+       if (mcl == rx_mcl)
+               return;
+
+       mcl->op = __HYPERVISOR_mmu_update;
+       mcl->args[0] = (unsigned long)rx_mmu;
+       mcl->args[1] = mmu - rx_mmu;
+       mcl->args[2] = 0;
+       mcl->args[3] = DOMID_SELF;
+       mcl++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+#else
+       mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+#endif
+       BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
+
+       mcl = rx_mcl;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
+                                    gop - grant_rx_op)) { 
+               /*
+                * The other side has given us a bad grant ref, or has no 
+                * headroom, or has gone away. Unfortunately the current grant
+                * table code doesn't inform us which is the case, so not much
+                * we can do. 
+                */
+               DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) "
+                       "%d packets.\n",
+                       grant_rx_op[0].domid, gop - grant_rx_op); 
+       }
+       gop = grant_rx_op;
+#else
+       mmuext = rx_mmuext;
+#endif
+       while ((skb = __skb_dequeue(&rxq)) != NULL) {
+               netif   = netdev_priv(skb->dev);
+               size    = skb->tail - skb->data;
+
+               /* Rederive the machine addresses. */
+               new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
+#else
+               old_mfn = mmuext[0].arg1.mfn;
+#endif
+               atomic_set(&(skb_shinfo(skb)->dataref), 1);
+               skb_shinfo(skb)->nr_frags = 0;
+               skb_shinfo(skb)->frag_list = NULL;
+
+               netif->stats.tx_bytes += size;
+               netif->stats.tx_packets++;
+
+               /* The update_va_mapping() must not fail. */
+               BUG_ON(mcl[0].result != 0);
+
+               /* Check the reassignment error code. */
+               status = NETIF_RSP_OKAY;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if(gop->status != 0) { 
+                       DPRINTK("Bad status %d from grant transfer to DOM%u\n",
+                               gop->status, netif->domid);
+                       /* XXX SMH: should free 'old_mfn' here */
+                       status = NETIF_RSP_ERROR; 
+               } 
+#else
+               if (unlikely(mcl[1].result != 0)) {
+                       DPRINTK("Failed MMU update transferring to DOM%u\n",
+                               netif->domid);
+                       free_mfn(old_mfn);
+                       status = NETIF_RSP_ERROR;
+               }
+#endif
+               evtchn = netif->evtchn;
+               id = netif->rx->ring[
+                       MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+               if (make_rx_response(netif, id, status,
+                                    (old_mfn << PAGE_SHIFT) | /* XXX */
+                                    ((unsigned long)skb->data & ~PAGE_MASK),
+                                    size, skb->proto_csum_valid) &&
+                   (rx_notify[evtchn] == 0)) {
+                       rx_notify[evtchn] = 1;
+                       notify_list[notify_nr++] = evtchn;
+               }
+
+               netif_put(netif);
+               dev_kfree_skb(skb);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mcl++;
+               gop++;
+#else
+               mcl += 2;
+               mmuext += 1;
+#endif
+       }
+
+       while (notify_nr != 0) {
+               evtchn = notify_list[--notify_nr];
+               rx_notify[evtchn] = 0;
+               notify_via_evtchn(evtchn);
+       }
+
+       /* More work to do? */
+       if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
+               tasklet_schedule(&net_rx_tasklet);
 #if 0
-    else
-        xen_network_done_notify();
+       else
+               xen_network_done_notify();
 #endif
 }
 
 static void net_alarm(unsigned long unused)
 {
-    tasklet_schedule(&net_rx_tasklet);
+       tasklet_schedule(&net_rx_tasklet);
 }
 
 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
 {
-    netif_t *netif = netdev_priv(dev);
-    return &netif->stats;
+       netif_t *netif = netdev_priv(dev);
+       return &netif->stats;
 }
 
 static int __on_net_schedule_list(netif_t *netif)
 {
-    return netif->list.next != NULL;
+       return netif->list.next != NULL;
 }
 
 static void remove_from_net_schedule_list(netif_t *netif)
 {
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( likely(__on_net_schedule_list(netif)) )
-    {
-        list_del(&netif->list);
-        netif->list.next = NULL;
-        netif_put(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
+       spin_lock_irq(&net_schedule_list_lock);
+       if (likely(__on_net_schedule_list(netif))) {
+               list_del(&netif->list);
+               netif->list.next = NULL;
+               netif_put(netif);
+       }
+       spin_unlock_irq(&net_schedule_list_lock);
 }
 
 static void add_to_net_schedule_list_tail(netif_t *netif)
 {
-    if ( __on_net_schedule_list(netif) )
-        return;
-
-    spin_lock_irq(&net_schedule_list_lock);
-    if ( !__on_net_schedule_list(netif) && netif->active )
-    {
-        list_add_tail(&netif->list, &net_schedule_list);
-        netif_get(netif);
-    }
-    spin_unlock_irq(&net_schedule_list_lock);
+       if (__on_net_schedule_list(netif))
+               return;
+
+       spin_lock_irq(&net_schedule_list_lock);
+       if (!__on_net_schedule_list(netif) && netif->active) {
+               list_add_tail(&netif->list, &net_schedule_list);
+               netif_get(netif);
+       }
+       spin_unlock_irq(&net_schedule_list_lock);
 }
 
 void netif_schedule_work(netif_t *netif)
 {
-    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
+       if ((netif->tx_req_cons != netif->tx->req_prod) &&
+           ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
+               add_to_net_schedule_list_tail(netif);
+               maybe_schedule_tx_action();
+       }
 }
 
 void netif_deschedule_work(netif_t *netif)
 {
-    remove_from_net_schedule_list(netif);
+       remove_from_net_schedule_list(netif);
 }
 
 
 static void tx_credit_callback(unsigned long data)
 {
-    netif_t *netif = (netif_t *)data;
-    netif->remaining_credit = netif->credit_bytes;
-    netif_schedule_work(netif);
+       netif_t *netif = (netif_t *)data;
+       netif->remaining_credit = netif->credit_bytes;
+       netif_schedule_work(netif);
 }
 
 inline static void net_tx_action_dealloc(void)
 {
 #ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_unmap_grant_ref_t *gop;
-#else
-    multicall_entry_t *mcl;
-#endif
-    u16 pending_idx;
-    PEND_RING_IDX dc, dp;
-    netif_t *netif;
-
-    dc = dealloc_cons;
-    dp = dealloc_prod;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    /*
-     * Free up any grants we have finished using
-     */
-    gop = tx_unmap_ops;
-    while ( dc != dp )
-    {
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-        gop->host_addr    = MMAP_VADDR(pending_idx);
-        gop->dev_bus_addr = 0;
-        gop->handle       = grant_tx_ref[pending_idx];
-        grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
-        gop++;
-    }
-    BUG_ON(HYPERVISOR_grant_table_op(
-               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
-#else
-    mcl = tx_mcl;
-    while ( dc != dp )
-    {
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
-       MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
-                               __pte(0), 0);
-        mcl++;     
-    }
-
-    mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
-        BUG();
-
-    mcl = tx_mcl;
-#endif
-    while ( dealloc_cons != dp )
-    {
+       gnttab_unmap_grant_ref_t *gop;
+#else
+       multicall_entry_t *mcl;
+#endif
+       u16 pending_idx;
+       PEND_RING_IDX dc, dp;
+       netif_t *netif;
+
+       dc = dealloc_cons;
+       dp = dealloc_prod;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       /*
+        * Free up any grants we have finished using
+        */
+       gop = tx_unmap_ops;
+       while (dc != dp) {
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+               gop->host_addr    = MMAP_VADDR(pending_idx);
+               gop->dev_bus_addr = 0;
+               gop->handle       = grant_tx_ref[pending_idx];
+               grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
+               gop++;
+       }
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
+#else
+       mcl = tx_mcl;
+       while (dc != dp) {
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+               MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
+                                       __pte(0), 0);
+               mcl++;     
+       }
+
+       mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
+       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
+
+       mcl = tx_mcl;
+#endif
+       while (dealloc_cons != dp) {
 #ifndef CONFIG_XEN_NETDEV_GRANT
-        /* The update_va_mapping() must not fail. */
-        BUG_ON(mcl[0].result != 0);
-#endif
-
-        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
-
-        netif = pending_tx_info[pending_idx].netif;
-
-        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
-                         NETIF_RSP_OKAY);
+               /* The update_va_mapping() must not fail. */
+               BUG_ON(mcl[0].result != 0);
+#endif
+
+               pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+               netif = pending_tx_info[pending_idx].netif;
+
+               make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
+                                NETIF_RSP_OKAY);
         
-        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-
-        /*
-         * Scheduling checks must happen after the above response is posted.
-         * This avoids a possible race with a guest OS on another CPU if that
-         * guest is testing against 'resp_prod' when deciding whether to notify
-         * us when it queues additional packets.
-         */
-        mb();
-        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
-             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
-            add_to_net_schedule_list_tail(netif);
+               pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+               /*
+                * Scheduling checks must happen after the above response is
+                * posted. This avoids a possible race with a guest OS on
+                * another CPU if that guest is testing against 'resp_prod'
+                * when deciding whether to notify us when it queues additional
+                 * packets.
+                */
+               mb();
+               if ((netif->tx_req_cons != netif->tx->req_prod) &&
+                   ((netif->tx_req_cons-netif->tx_resp_prod) !=
+                    NETIF_TX_RING_SIZE))
+                       add_to_net_schedule_list_tail(netif);
         
-        netif_put(netif);
+               netif_put(netif);
 
 #ifndef CONFIG_XEN_NETDEV_GRANT
-        mcl++;
-#endif
-    }
-
+               mcl++;
+#endif
+       }
 }
 
 /* Called after netfront has transmitted */
 static void net_tx_action(unsigned long unused)
 {
-    struct list_head *ent;
-    struct sk_buff *skb;
-    netif_t *netif;
-    netif_tx_request_t txreq;
-    u16 pending_idx;
-    NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    gnttab_map_grant_ref_t *mop;
-#else
-    multicall_entry_t *mcl;
-#endif
-    unsigned int data_len;
-
-    if ( dealloc_cons != dealloc_prod )
-        net_tx_action_dealloc();
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    mop = tx_map_ops;
-#else
-    mcl = tx_mcl;
-#endif
-    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
-            !list_empty(&net_schedule_list) )
-    {
-        /* Get a netif from the list with work to do. */
-        ent = net_schedule_list.next;
-        netif = list_entry(ent, netif_t, list);
-        netif_get(netif);
-        remove_from_net_schedule_list(netif);
-
-        /* Work to do? */
-        i = netif->tx_req_cons;
-        if ( (i == netif->tx->req_prod) ||
-             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
-        {
-            netif_put(netif);
-            continue;
-        }
-
-        rmb(); /* Ensure that we see the request before we copy it. */
-        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
-               sizeof(txreq));
-        /* Credit-based scheduling. */
-        if ( txreq.size > netif->remaining_credit )
-        {
-            unsigned long now = jiffies;
-            unsigned long next_credit = 
-                netif->credit_timeout.expires +
-                msecs_to_jiffies(netif->credit_usec / 1000);
-
-            /* Timer could already be pending in some rare cases. */
-            if ( timer_pending(&netif->credit_timeout) )
-                break;
-
-            /* Already passed the point at which we can replenish credit? */
-            if ( time_after_eq(now, next_credit) )
-            {
-                netif->credit_timeout.expires = now;
-                netif->remaining_credit = netif->credit_bytes;
-            }
-
-            /* Still too big to send right now? Then set a timer callback. */
-            if ( txreq.size > netif->remaining_credit )
-            {
-                netif->remaining_credit = 0;
-                netif->credit_timeout.expires  = next_credit;
-                netif->credit_timeout.data     = (unsigned long)netif;
-                netif->credit_timeout.function = tx_credit_callback;
-                add_timer_on(&netif->credit_timeout, smp_processor_id());
-                break;
-            }
-        }
-        netif->remaining_credit -= txreq.size;
-
-        /*
-         * Why the barrier? It ensures that the frontend sees updated req_cons
-         * before we check for more work to schedule.
-         */
-        netif->tx->req_cons = ++netif->tx_req_cons;
-        mb();
-
-        netif_schedule_work(netif);
-
-        if ( unlikely(txreq.size < ETH_HLEN) || 
-             unlikely(txreq.size > ETH_FRAME_LEN) )
-        {
-            DPRINTK("Bad packet size: %d\n", txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue; 
-        }
-
-        /* No crossing a page boundary as the payload mustn't fragment. */
-        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
-        {
-            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
-                    txreq.addr, txreq.size, 
-                    (txreq.addr &~PAGE_MASK) + txreq.size);
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            continue;
-        }
-
-        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
-
-        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
-        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
-        {
-            DPRINTK("Can't allocate a skb in start_xmit.\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            break;
-        }
-
-        /* Packets passed to netif_rx() must have some headroom. */
-        skb_reserve(skb, 16);
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mop->host_addr = MMAP_VADDR(pending_idx);
-        mop->dom       = netif->domid;
-        mop->ref       = txreq.addr >> PAGE_SHIFT;
-        mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
-        mop++;
-#else
-       MULTI_update_va_mapping_otherdomain(
-           mcl, MMAP_VADDR(pending_idx),
-           pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
-           0, netif->domid);
-
-        mcl++;
-#endif
-
-        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
-        pending_tx_info[pending_idx].netif = netif;
-        *((u16 *)skb->data) = pending_idx;
-
-        __skb_queue_tail(&tx_queue, skb);
-
-        pending_cons++;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if ( (mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops) )
-            break;
-#else
-        /* Filled the batch queue? */
-        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
-            break;
-#endif
-    }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    if ( mop == tx_map_ops )
-        return;
-
-    BUG_ON(HYPERVISOR_grant_table_op(
-        GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
-
-    mop = tx_map_ops;
-#else
-    if ( mcl == tx_mcl )
-        return;
-
-    BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
-    mcl = tx_mcl;
-#endif
-    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
-    {
-        pending_idx = *((u16 *)skb->data);
-        netif       = pending_tx_info[pending_idx].netif;
-        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
-
-        /* Check the remap error code. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        if ( unlikely(mop->handle < 0) )
-        {
-            printk(KERN_ALERT "#### netback grant fails\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            kfree_skb(skb);
-            mop++;
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            continue;
-        }
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
-                             FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
-        grant_tx_ref[pending_idx] = mop->handle;
-#else
-        if ( unlikely(mcl[0].result != 0) )
-        {
-            DPRINTK("Bad page frame\n");
-            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
-            netif_put(netif);
-            kfree_skb(skb);
-            mcl++;
-            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
-            continue;
-        }
-
-        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
-            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-#endif
-
-        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
-
-        __skb_put(skb, data_len);
-        memcpy(skb->data, 
-               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
-               data_len);
-        if ( data_len < txreq.size )
-        {
-            /* Append the packet payload as a fragment. */
-            skb_shinfo(skb)->frags[0].page        = 
-                virt_to_page(MMAP_VADDR(pending_idx));
-            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
-            skb_shinfo(skb)->frags[0].page_offset = 
-                (txreq.addr + data_len) & ~PAGE_MASK;
-            skb_shinfo(skb)->nr_frags = 1;
-        }
-        else
-        {
-            /* Schedule a response immediately. */
-            netif_idx_release(pending_idx);
-        }
-
-        skb->data_len  = txreq.size - data_len;
-        skb->len      += skb->data_len;
-
-        skb->dev      = netif->dev;
-        skb->protocol = eth_type_trans(skb, skb->dev);
-
-        /* No checking needed on localhost, but remember the field is blank. */
-        skb->ip_summed        = CHECKSUM_UNNECESSARY;
-        skb->proto_csum_valid = 1;
-        skb->proto_csum_blank = txreq.csum_blank;
-
-        netif->stats.rx_bytes += txreq.size;
-        netif->stats.rx_packets++;
-
-        netif_rx(skb);
-        netif->dev->last_rx = jiffies;
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
-        mop++;
-#else
-        mcl++;
-#endif
-    }
+       struct list_head *ent;
+       struct sk_buff *skb;
+       netif_t *netif;
+       netif_tx_request_t txreq;
+       u16 pending_idx;
+       NETIF_RING_IDX i;
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       gnttab_map_grant_ref_t *mop;
+#else
+       multicall_entry_t *mcl;
+#endif
+       unsigned int data_len;
+
+       if (dealloc_cons != dealloc_prod)
+               net_tx_action_dealloc();
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       mop = tx_map_ops;
+#else
+       mcl = tx_mcl;
+#endif
+       while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
+               !list_empty(&net_schedule_list)) {
+               /* Get a netif from the list with work to do. */
+               ent = net_schedule_list.next;
+               netif = list_entry(ent, netif_t, list);
+               netif_get(netif);
+               remove_from_net_schedule_list(netif);
+
+               /* Work to do? */
+               i = netif->tx_req_cons;
+               if ((i == netif->tx->req_prod) ||
+                   ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
+                       netif_put(netif);
+                       continue;
+               }
+
+               rmb(); /* Ensure that we see the request before we copy it. */
+               memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
+                      sizeof(txreq));
+               /* Credit-based scheduling. */
+               if (txreq.size > netif->remaining_credit) {
+                       unsigned long now = jiffies;
+                       unsigned long next_credit = 
+                               netif->credit_timeout.expires +
+                               msecs_to_jiffies(netif->credit_usec / 1000);
+
+                       /* Timer could already be pending in rare cases. */
+                       if (timer_pending(&netif->credit_timeout))
+                               break;
+
+                       /* Passed the point where we can replenish credit? */
+                       if (time_after_eq(now, next_credit)) {
+                               netif->credit_timeout.expires = now;
+                               netif->remaining_credit = netif->credit_bytes;
+                       }
+
+                       /* Still too big to send right now? Set a callback. */
+                       if (txreq.size > netif->remaining_credit) {
+                               netif->remaining_credit = 0;
+                               netif->credit_timeout.expires  = 
+                                       next_credit;
+                               netif->credit_timeout.data     =
+                                       (unsigned long)netif;
+                               netif->credit_timeout.function =
+                                       tx_credit_callback;
+                               add_timer_on(&netif->credit_timeout,
+                                            smp_processor_id());
+                               break;
+                       }
+               }
+               netif->remaining_credit -= txreq.size;
+
+               /*
+                * Why the barrier? It ensures that the frontend sees updated
+                * req_cons before we check for more work to schedule.
+                */
+               netif->tx->req_cons = ++netif->tx_req_cons;
+               mb();
+
+               netif_schedule_work(netif);
+
+               if (unlikely(txreq.size < ETH_HLEN) || 
+                   unlikely(txreq.size > ETH_FRAME_LEN)) {
+                       DPRINTK("Bad packet size: %d\n", txreq.size);
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       continue; 
+               }
+
+               /* No crossing a page as the payload mustn't fragment. */
+               if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
+                            PAGE_SIZE)) {
+                       DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
+                               txreq.addr, txreq.size, 
+                               (txreq.addr &~PAGE_MASK) + txreq.size);
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       continue;
+               }
+
+               pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+               data_len = (txreq.size > PKT_PROT_LEN) ?
+                       PKT_PROT_LEN : txreq.size;
+
+               skb = alloc_skb(data_len+16, GFP_ATOMIC);
+               if (unlikely(skb == NULL)) {
+                       DPRINTK("Can't allocate a skb in start_xmit.\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       break;
+               }
+
+               /* Packets passed to netif_rx() must have some headroom. */
+               skb_reserve(skb, 16);
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mop->host_addr = MMAP_VADDR(pending_idx);
+               mop->dom       = netif->domid;
+               mop->ref       = txreq.addr >> PAGE_SHIFT;
+               mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
+               mop++;
+#else
+               MULTI_update_va_mapping_otherdomain(
+                       mcl, MMAP_VADDR(pending_idx),
+                       pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
+                       0, netif->domid);
+
+               mcl++;
+#endif
+
+               memcpy(&pending_tx_info[pending_idx].req,
+                      &txreq, sizeof(txreq));
+               pending_tx_info[pending_idx].netif = netif;
+               *((u16 *)skb->data) = pending_idx;
+
+               __skb_queue_tail(&tx_queue, skb);
+
+               pending_cons++;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
+                       break;
+#else
+               /* Filled the batch queue? */
+               if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
+                       break;
+#endif
+       }
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       if (mop == tx_map_ops)
+               return;
+
+       BUG_ON(HYPERVISOR_grant_table_op(
+               GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
+
+       mop = tx_map_ops;
+#else
+       if (mcl == tx_mcl)
+               return;
+
+       BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
+
+       mcl = tx_mcl;
+#endif
+       while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
+               pending_idx = *((u16 *)skb->data);
+               netif       = pending_tx_info[pending_idx].netif;
+               memcpy(&txreq, &pending_tx_info[pending_idx].req,
+                      sizeof(txreq));
+
+               /* Check the remap error code. */
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               if (unlikely(mop->handle < 0)) {
+                       printk(KERN_ALERT "#### netback grant fails\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       kfree_skb(skb);
+                       mop++;
+                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
+                               pending_idx;
+                       continue;
+               }
+               phys_to_machine_mapping[
+                       __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+                       FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
+               grant_tx_ref[pending_idx] = mop->handle;
+#else
+               if (unlikely(mcl[0].result != 0)) {
+                       DPRINTK("Bad page frame\n");
+                       make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+                       netif_put(netif);
+                       kfree_skb(skb);
+                       mcl++;
+                       pending_ring[MASK_PEND_IDX(pending_prod++)] =
+                               pending_idx;
+                       continue;
+               }
+
+               phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
+                                      PAGE_SHIFT] =
+                       FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+#endif
+
+               data_len = (txreq.size > PKT_PROT_LEN) ?
+                       PKT_PROT_LEN : txreq.size;
+
+               __skb_put(skb, data_len);
+               memcpy(skb->data, 
+                      (void *)(MMAP_VADDR(pending_idx)|
+                               (txreq.addr&~PAGE_MASK)),
+                      data_len);
+               if (data_len < txreq.size) {
+                       /* Append the packet payload as a fragment. */
+                       skb_shinfo(skb)->frags[0].page        = 
+                               virt_to_page(MMAP_VADDR(pending_idx));
+                       skb_shinfo(skb)->frags[0].size        =
+                               txreq.size - data_len;
+                       skb_shinfo(skb)->frags[0].page_offset = 
+                               (txreq.addr + data_len) & ~PAGE_MASK;
+                       skb_shinfo(skb)->nr_frags = 1;
+               } else {
+                       /* Schedule a response immediately. */
+                       netif_idx_release(pending_idx);
+               }
+
+               skb->data_len  = txreq.size - data_len;
+               skb->len      += skb->data_len;
+
+               skb->dev      = netif->dev;
+               skb->protocol = eth_type_trans(skb, skb->dev);
+
+               /*
+                 * No checking needed on localhost, but remember the field is
+                 * blank. 
+                 */
+               skb->ip_summed        = CHECKSUM_UNNECESSARY;
+               skb->proto_csum_valid = 1;
+               skb->proto_csum_blank = txreq.csum_blank;
+
+               netif->stats.rx_bytes += txreq.size;
+               netif->stats.rx_packets++;
+
+               netif_rx(skb);
+               netif->dev->last_rx = jiffies;
+
+#ifdef CONFIG_XEN_NETDEV_GRANT
+               mop++;
+#else
+               mcl++;
+#endif
+       }
 }
 
 static void netif_idx_release(u16 pending_idx)
 {
-    static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
-    unsigned long flags;
-
-    spin_lock_irqsave(&_lock, flags);
-    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
-    spin_unlock_irqrestore(&_lock, flags);
-
-    tasklet_schedule(&net_tx_tasklet);
+       static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
+       unsigned long flags;
+
+       spin_lock_irqsave(&_lock, flags);
+       dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+       spin_unlock_irqrestore(&_lock, flags);
+
+       tasklet_schedule(&net_tx_tasklet);
 }
 
 static void netif_page_release(struct page *page)
 {
-    u16 pending_idx = page - virt_to_page(mmap_vstart);
-
-    /* Ready for next use. */
-    set_page_count(page, 1);
-
-    netif_idx_release(pending_idx);
+       u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+       /* Ready for next use. */
+       set_page_count(page, 1);
+
+       netif_idx_release(pending_idx);
 }
 
 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 {
-    netif_t *netif = dev_id;
-    if ( tx_work_exists(netif) )
-    {
-        add_to_net_schedule_list_tail(netif);
-        maybe_schedule_tx_action();
-    }
-    return IRQ_HANDLED;
+       netif_t *netif = dev_id;
+       if (tx_work_exists(netif)) {
+               add_to_net_schedule_list_tail(netif);
+               maybe_schedule_tx_action();
+       }
+       return IRQ_HANDLED;
 }
 
 static void make_tx_response(netif_t *netif, 
                              u16      id,
                              s8       st)
 {
-    NETIF_RING_IDX i = netif->tx_resp_prod;
-    netif_tx_response_t *resp;
-
-    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
-    resp->id     = id;
-    resp->status = st;
-    wmb();
-    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    if ( i == netif->tx->event )
-        notify_via_evtchn(netif->evtchn);
+       NETIF_RING_IDX i = netif->tx_resp_prod;
+       netif_tx_response_t *resp;
+
+       resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+       resp->id     = id;
+       resp->status = st;
+       wmb();
+       netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+       mb(); /* Update producer before checking event threshold. */
+       if (i == netif->tx->event)
+               notify_via_evtchn(netif->evtchn);
 }
 
 static int make_rx_response(netif_t *netif, 
@@ -882,110 +878,120 @@
                             u16      size,
                             u16      csum_valid)
 {
-    NETIF_RING_IDX i = netif->rx_resp_prod;
-    netif_rx_response_t *resp;
-
-    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
-    resp->addr       = addr;
-    resp->csum_valid = csum_valid;
-    resp->id         = id;
-    resp->status     = (s16)size;
-    if ( st < 0 )
-        resp->status = (s16)st;
-    wmb();
-    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
-
-    mb(); /* Update producer before checking event threshold. */
-    return (i == netif->rx->event);
+       NETIF_RING_IDX i = netif->rx_resp_prod;
+       netif_rx_response_t *resp;
+
+       resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+       resp->addr       = addr;
+       resp->csum_valid = csum_valid;
+       resp->id         = id;
+       resp->status     = (s16)size;
+       if (st < 0)
+               resp->status = (s16)st;
+       wmb();
+       netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+       mb(); /* Update producer before checking event threshold. */
+       return (i == netif->rx->event);
 }
 
 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
 {
-    struct list_head *ent;
-    netif_t *netif;
-    int i = 0;
-
-    printk(KERN_ALERT "netif_schedule_list:\n");
-    spin_lock_irq(&net_schedule_list_lock);
-
-    list_for_each ( ent, &net_schedule_list )
-    {
-        netif = list_entry(ent, netif_t, list);
-        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
-               i, netif->rx_req_cons, netif->rx_resp_prod);               
-        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-               netif->tx_req_cons, netif->tx_resp_prod);
-        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
-               netif->rx->req_prod, netif->rx->resp_prod);
-        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-               netif->rx->event, netif->tx->req_prod);
-        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-               netif->tx->resp_prod, netif->tx->event);
-        i++;
-    }
-
-    spin_unlock_irq(&net_schedule_list_lock);
-    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
-
-    return IRQ_HANDLED;
+       struct list_head *ent;
+       netif_t *netif;
+       int i = 0;
+
+       printk(KERN_ALERT "netif_schedule_list:\n");
+       spin_lock_irq(&net_schedule_list_lock);
+
+       list_for_each (ent, &net_schedule_list) {
+               netif = list_entry(ent, netif_t, list);
+               printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
+                      "rx_resp_prod=%08x\n",
+                      i, netif->rx_req_cons, netif->rx_resp_prod);
+               printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
+                      netif->tx_req_cons, netif->tx_resp_prod);
+               printk(KERN_ALERT "   shared(rx_req_prod=%08x "
+                      "rx_resp_prod=%08x\n",
+                      netif->rx->req_prod, netif->rx->resp_prod);
+               printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
+                      netif->rx->event, netif->tx->req_prod);
+               printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
+                      netif->tx->resp_prod, netif->tx->event);
+               i++;
+       }
+
+       spin_unlock_irq(&net_schedule_list_lock);
+       printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+       return IRQ_HANDLED;
 }
 
 static int __init netback_init(void)
 {
-    int i;
-    struct page *page;
-
-    if ( !(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
-         !(xen_start_info->flags & SIF_INITDOMAIN) )
-        return 0;
-
-    IPRINTK("Initialising Xen netif backend.\n");
-#ifdef CONFIG_XEN_NETDEV_GRANT
-    IPRINTK("Using grant tables.\n");
-#endif
-
-    /* We can increase reservation by this much in net_rx_action(). */
-    balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
-
-    skb_queue_head_init(&rx_queue);
-    skb_queue_head_init(&tx_queue);
-
-    init_timer(&net_timer);
-    net_timer.data = 0;
-    net_timer.function = net_alarm;
+       int i;
+       struct page *page;
+
+       if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
+           !(xen_start_info->flags & SIF_INITDOMAIN))
+               return 0;
+
+       IPRINTK("Initialising Xen netif backend.\n");
+#ifdef CONFIG_XEN_NETDEV_GRANT
+       IPRINTK("Using grant tables.\n");
+#endif
+
+       /* We can increase reservation by this much in net_rx_action(). */
+       balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
+
+       skb_queue_head_init(&rx_queue);
+       skb_queue_head_init(&tx_queue);
+
+       init_timer(&net_timer);
+       net_timer.data = 0;
+       net_timer.function = net_alarm;
     
-    page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
-    BUG_ON(page == NULL);
-    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-    {
-        page = virt_to_page(MMAP_VADDR(i));
-        set_page_count(page, 1);
-        SetPageForeign(page, netif_page_release);
-    }
-
-    pending_cons = 0;
-    pending_prod = MAX_PENDING_REQS;
-    for ( i = 0; i < MAX_PENDING_REQS; i++ )
-        pending_ring[i] = i;
-
-    spin_lock_init(&net_schedule_list_lock);
-    INIT_LIST_HEAD(&net_schedule_list);
-
-    netif_xenbus_init();
-
-    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
-                      netif_be_dbg, SA_SHIRQ, 
-                      "net-be-dbg", &netif_be_dbg);
-
-    return 0;
+       page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
+       BUG_ON(page == NULL);
+       mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+
+       for (i = 0; i < MAX_PENDING_REQS; i++) {
+               page = virt_to_page(MMAP_VADDR(i));
+               set_page_count(page, 1);
+               SetPageForeign(page, netif_page_release);
+       }
+
+       pending_cons = 0;
+       pending_prod = MAX_PENDING_REQS;
+       for (i = 0; i < MAX_PENDING_REQS; i++)
+               pending_ring[i] = i;
+
+       spin_lock_init(&net_schedule_list_lock);
+       INIT_LIST_HEAD(&net_schedule_list);
+
+       netif_xenbus_init();
+
+       (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+                         netif_be_dbg, SA_SHIRQ, 
+                         "net-be-dbg", &netif_be_dbg);
+
+       return 0;
 }
 
 static void netback_cleanup(void)
 {
-    BUG();
+       BUG();
 }
 
 module_init(netback_init);
 module_exit(netback_cleanup);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 8bb3f2567b8c -r 7fbaf67a0af5 
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 13:06:49 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 13:27:01 2005
@@ -294,3 +294,13 @@
 {
        xenbus_register_backend(&netback);
 }
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Clean up and re-indent netback driver., Xen patchbot -unstable <=