# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 8b74b9603d5e1b8e4f2a0b970f1425b2840b17cc
# Parent 7c1f2e20123a61341c0355f97437f916e1b54095
If netfront fails to allocate a receive skbuff, push all pending
skbuffs out onto the shared ring. If there are no skbuffs to push,
schedule a timer to try again later. This will avoid interface
lockups in low-memory conditions.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 7c1f2e20123a -r 8b74b9603d5e
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Dec 27
10:40:33 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Dec 28
11:29:15 2005
@@ -116,6 +116,8 @@
#define RX_MAX_TARGET NET_RX_RING_SIZE
int rx_min_target, rx_max_target, rx_target;
struct sk_buff_head rx_batch;
+
+ struct timer_list rx_refill_timer;
/*
* {tx,rx}_skbs store outstanding skbuffs. The first entry in each
@@ -517,6 +519,13 @@
}
+static void rx_refill_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+
static void network_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
@@ -534,7 +543,7 @@
* Allocate skbuffs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
* allocator, so should reduce the chance of failed allocation requests
- * both for ourself and for other kernel subsystems.
+ * both for ourself and for other kernel subsystems.
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
@@ -545,8 +554,15 @@
skb = alloc_xen_skb(
((PAGE_SIZE - sizeof(struct skb_shared_info)) &
(-SKB_DATA_ALIGN(1))) - 16);
- if (skb == NULL)
- break;
+ if (skb == NULL) {
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
+ /* Could not allocate any skbuffs. Try again later. */
+ mod_timer(&np->rx_refill_timer,
+ jiffies + (HZ/10));
+ return;
+ }
__skb_queue_tail(&np->rx_batch, skb);
}
@@ -554,6 +570,12 @@
if (i < (np->rx_target/2))
return;
+ /* Adjust our fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+
+ refill:
for (i = 0; ; i++) {
if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
break;
@@ -608,11 +630,6 @@
/* Above is a suitable barrier to ensure backend will see requests. */
np->rx.req_prod_pvt = req_prod + i;
RING_PUSH_REQUESTS(&np->rx);
-
- /* Adjust our fill target if we risked running out of buffers. */
- if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
- ((np->rx_target *= 2) > np->rx_max_target))
- np->rx_target = np->rx_max_target;
}
@@ -1077,6 +1094,10 @@
np->rx_min_target = RX_MIN_TARGET;
np->rx_max_target = RX_MAX_TARGET;
+ init_timer(&np->rx_refill_timer);
+ np->rx_refill_timer.data = (unsigned long)netdev;
+ np->rx_refill_timer.function = rx_refill_timeout;
+
/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
for (i = 0; i <= NET_TX_RING_SIZE; i++) {
np->tx_skbs[i] = (void *)((unsigned long) i+1);
@@ -1223,7 +1244,7 @@
/* info->backend_state = BEST_DISCONNECTED; */
spin_unlock(&info->rx_lock);
spin_unlock_irq(&info->tx_lock);
-
+
end_access(info->tx_ring_ref, info->tx.sring);
end_access(info->rx_ring_ref, info->rx.sring);
info->tx_ring_ref = GRANT_INVALID_REF;
@@ -1234,6 +1255,8 @@
if (info->irq)
unbind_from_irqhandler(info->irq, info->netdev);
info->evtchn = info->irq = 0;
+
+ del_timer_sync(&info->rx_refill_timer);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|