# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1173283247 0
# Node ID 42b29f084c31bcc2e7a08595aac838df2cdfe1f8
# Parent fbbf1f07fefe38807c21a4b8398dd1b5341a1260
linux: Use fake carrier flag for netfront/netback rather than the real
netif_carrier_XXX() functions. This makes network bringup much faster.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
linux-2.6-xen-sparse/drivers/xen/netback/common.h | 15 ++++
linux-2.6-xen-sparse/drivers/xen/netback/interface.c | 18 +++--
linux-2.6-xen-sparse/drivers/xen/netback/netback.c | 12 +--
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c | 4 -
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c | 64 ++++++++++---------
5 files changed, 69 insertions(+), 44 deletions(-)
diff -r fbbf1f07fefe -r 42b29f084c31
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Wed Mar 07 13:07:12
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Wed Mar 07 16:00:47
2007 +0000
@@ -99,8 +99,20 @@ typedef struct netif_st {
struct net_device *dev;
struct net_device_stats stats;
+ unsigned int carrier;
+
wait_queue_head_t waiting_to_free;
} netif_t;
+
+/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss; also the etherbridge
+ * can be rather lazy in activating its port).
+ */
+#define netback_carrier_on(netif) ((netif)->carrier = 1)
+#define netback_carrier_off(netif) ((netif)->carrier = 0)
+#define netback_carrier_ok(netif) ((netif)->carrier)
#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
@@ -120,7 +132,8 @@ int netif_map(netif_t *netif, unsigned l
void netif_xenbus_init(void);
-#define netif_schedulable(dev) (netif_running(dev) && netif_carrier_ok(dev))
+#define netif_schedulable(netif) \
+ (netif_running((netif)->dev) && netback_carrier_ok(netif))
void netif_schedule_work(netif_t *netif);
void netif_deschedule_work(netif_t *netif);
diff -r fbbf1f07fefe -r 42b29f084c31
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Wed Mar 07
13:07:12 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Wed Mar 07
16:00:47 2007 +0000
@@ -66,16 +66,19 @@ static int net_open(struct net_device *d
static int net_open(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif)) {
__netif_up(netif);
+ netif_start_queue(dev);
+ }
return 0;
}
static int net_close(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
- if (netif_carrier_ok(dev))
+ if (netback_carrier_ok(netif))
__netif_down(netif);
+ netif_stop_queue(dev);
return 0;
}
@@ -138,8 +141,6 @@ netif_t *netif_alloc(domid_t domid, unsi
return ERR_PTR(-ENOMEM);
}
- netif_carrier_off(dev);
-
netif = netdev_priv(dev);
memset(netif, 0, sizeof(*netif));
netif->domid = domid;
@@ -147,6 +148,8 @@ netif_t *netif_alloc(domid_t domid, unsi
atomic_set(&netif->refcnt, 1);
init_waitqueue_head(&netif->waiting_to_free);
netif->dev = dev;
+
+ netback_carrier_off(netif);
netif->credit_bytes = netif->remaining_credit = ~0UL;
netif->credit_usec = 0UL;
@@ -285,7 +288,7 @@ int netif_map(netif_t *netif, unsigned l
netif_get(netif);
rtnl_lock();
- netif_carrier_on(netif->dev);
+ netback_carrier_on(netif);
if (netif_running(netif->dev))
__netif_up(netif);
rtnl_unlock();
@@ -302,9 +305,10 @@ err_rx:
void netif_disconnect(netif_t *netif)
{
- if (netif_carrier_ok(netif->dev)) {
+ if (netback_carrier_ok(netif)) {
rtnl_lock();
- netif_carrier_off(netif->dev);
+ netback_carrier_off(netif);
+ netif_carrier_off(netif->dev); /* discard queued packets */
if (netif_running(netif->dev))
__netif_down(netif);
rtnl_unlock();
diff -r fbbf1f07fefe -r 42b29f084c31
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Mar 07
13:07:12 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Mar 07
16:00:47 2007 +0000
@@ -38,7 +38,7 @@
#include <xen/balloon.h>
#include <xen/interface/memory.h>
-/*#define NETBE_DEBUG_INTERRUPT*/
+#define NETBE_DEBUG_INTERRUPT
/* extra field used in struct page */
#define netif_page_index(pg) (*(long *)&(pg)->mapping)
@@ -234,7 +234,7 @@ static void tx_queue_callback(unsigned l
static void tx_queue_callback(unsigned long data)
{
netif_t *netif = (netif_t *)data;
- if (netif_schedulable(netif->dev))
+ if (netif_schedulable(netif))
netif_wake_queue(netif->dev);
}
@@ -245,7 +245,7 @@ int netif_be_start_xmit(struct sk_buff *
BUG_ON(skb->dev != dev);
/* Drop the packet if the target domain has no receive buffers. */
- if (unlikely(!netif_schedulable(dev) || netbk_queue_full(netif)))
+ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
goto drop;
/*
@@ -684,7 +684,7 @@ static void net_rx_action(unsigned long
}
if (netif_queue_stopped(netif->dev) &&
- netif_schedulable(netif->dev) &&
+ netif_schedulable(netif) &&
!netbk_queue_full(netif))
netif_wake_queue(netif->dev);
@@ -742,7 +742,7 @@ static void add_to_net_schedule_list_tai
spin_lock_irq(&net_schedule_list_lock);
if (!__on_net_schedule_list(netif) &&
- likely(netif_schedulable(netif->dev))) {
+ likely(netif_schedulable(netif))) {
list_add_tail(&netif->list, &net_schedule_list);
netif_get(netif);
}
@@ -1340,7 +1340,7 @@ irqreturn_t netif_be_int(int irq, void *
add_to_net_schedule_list_tail(netif);
maybe_schedule_tx_action();
- if (netif_schedulable(netif->dev) && !netbk_queue_full(netif))
+ if (netif_schedulable(netif) && !netbk_queue_full(netif))
netif_wake_queue(netif->dev);
return IRQ_HANDLED;
diff -r fbbf1f07fefe -r 42b29f084c31
linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Wed Mar 07 13:07:12
2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Wed Mar 07 16:00:47
2007 +0000
@@ -338,9 +338,7 @@ static void connect(struct backend_info
xenbus_switch_state(dev, XenbusStateConnected);
- /* May not get a kick from the frontend, so start the tx_queue now. */
- if (!netbk_can_queue(be->netif->dev))
- netif_wake_queue(be->netif->dev);
+ netif_wake_queue(be->netif->dev);
}
diff -r fbbf1f07fefe -r 42b29f084c31
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Mar 07
13:07:12 2007 +0000
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Mar 07
16:00:47 2007 +0000
@@ -154,6 +154,7 @@ struct netfront_info {
unsigned int irq;
unsigned int copying_receiver;
+ unsigned int carrier;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 8
@@ -191,6 +192,15 @@ struct netfront_rx_info {
struct netif_rx_response rx;
struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
};
+
+/*
+ * Implement our own carrier flag: the network stack's version causes delays
+ * when the carrier is re-enabled (in particular, dev_activate() may not
+ * immediately be called, which can cause packet loss).
+ */
+#define netfront_carrier_on(netif) ((netif)->carrier = 1)
+#define netfront_carrier_off(netif) ((netif)->carrier = 0)
+#define netfront_carrier_ok(netif) ((netif)->carrier)
/*
* Access macros for acquiring freeing slots in tx_skbs[].
@@ -590,6 +600,22 @@ static int send_fake_arp(struct net_devi
return dev_queue_xmit(skb);
}
+static inline int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static inline void network_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
static int network_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
@@ -597,7 +623,7 @@ static int network_open(struct net_devic
memset(&np->stats, 0, sizeof(np->stats));
spin_lock(&np->rx_lock);
- if (netif_carrier_ok(dev)) {
+ if (netfront_carrier_ok(np)) {
network_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
@@ -605,25 +631,9 @@ static int network_open(struct net_devic
}
spin_unlock(&np->rx_lock);
- netif_start_queue(dev);
+ network_maybe_wake_tx(dev);
return 0;
-}
-
-static inline int netfront_tx_slot_available(struct netfront_info *np)
-{
- return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
- (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
-}
-
-static inline void network_maybe_wake_tx(struct net_device *dev)
-{
- struct netfront_info *np = netdev_priv(dev);
-
- if (unlikely(netif_queue_stopped(dev)) &&
- netfront_tx_slot_available(np) &&
- likely(netif_running(dev)))
- netif_wake_queue(dev);
}
static void network_tx_buf_gc(struct net_device *dev)
@@ -633,7 +643,7 @@ static void network_tx_buf_gc(struct net
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
- BUG_ON(!netif_carrier_ok(dev));
+ BUG_ON(!netfront_carrier_ok(np));
do {
prod = np->tx.sring->rsp_prod;
@@ -703,7 +713,7 @@ static void network_alloc_rx_buffers(str
int nr_flips;
netif_rx_request_t *req;
- if (unlikely(!netif_carrier_ok(dev)))
+ if (unlikely(!netfront_carrier_ok(np)))
return;
/*
@@ -934,7 +944,7 @@ static int network_start_xmit(struct sk_
spin_lock_irq(&np->tx_lock);
- if (unlikely(!netif_carrier_ok(dev) ||
+ if (unlikely(!netfront_carrier_ok(np) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) {
spin_unlock_irq(&np->tx_lock);
@@ -1024,7 +1034,7 @@ static irqreturn_t netif_int(int irq, vo
spin_lock_irqsave(&np->tx_lock, flags);
- if (likely(netif_carrier_ok(dev))) {
+ if (likely(netfront_carrier_ok(np))) {
network_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
@@ -1299,7 +1309,7 @@ static int netif_poll(struct net_device
spin_lock(&np->rx_lock);
- if (unlikely(!netif_carrier_ok(dev))) {
+ if (unlikely(!netfront_carrier_ok(np))) {
spin_unlock(&np->rx_lock);
return 0;
}
@@ -1317,7 +1327,7 @@ static int netif_poll(struct net_device
work_done = 0;
while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
- memset(extras, 0, sizeof(extras));
+ memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(np, &rinfo, rp, &tmpq,
&pages_flipped);
@@ -1744,7 +1754,7 @@ static int network_connect(struct net_de
* domain a kick because we've probably just requeued some
* packets.
*/
- netif_carrier_on(dev);
+ netfront_carrier_on(np);
notify_remote_via_irq(np->irq);
network_tx_buf_gc(dev);
network_alloc_rx_buffers(dev);
@@ -1989,7 +1999,7 @@ static struct net_device * __devinit cre
np->netdev = netdev;
- netif_carrier_off(netdev);
+ netfront_carrier_off(np);
return netdev;
@@ -2023,7 +2033,7 @@ static void netif_disconnect_backend(str
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_irq(&info->tx_lock);
spin_lock(&info->rx_lock);
- netif_carrier_off(info->netdev);
+ netfront_carrier_off(info);
spin_unlock(&info->rx_lock);
spin_unlock_irq(&info->tx_lock);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|