WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [3/11] [NET] front: Stop using rx->id

To: Keir Fraser <Keir.Fraser@xxxxxxxxxxxx>, Xen Development Mailing List <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [3/11] [NET] front: Stop using rx->id
From: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Date: Sat, 8 Jul 2006 00:18:14 +1000
Delivery-date: Fri, 07 Jul 2006 07:19:26 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
In-reply-to: <20060707141634.GA12031@xxxxxxxxxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20060707141634.GA12031@xxxxxxxxxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.9i
Hi:

[NET] front: Stop using rx->id

With the current protocol for transferring packets from dom0 to domU, the
rx->id field is useless because it can be derived from the rx request ring
ID.  In particular,

        rx->id = (ring_id & NET_RX_RING_SIZE - 1) + 1;

This formula works because the rx response to each request always occupies
the same slot that the request arrived in.  This in turn is a consequence
of the fact that each packet only occupies one slot.

The other important reason that this works for dom0=>domU but not domU=>dom0
is that the resource associated with the rx->id is freed immediately while
in the domU=>dom0 case the resource is held until the skb is liberated by
dom0.

Using this formula we can essentially remove rx->id from the protocol,
freeing up space that could be instead be used by things like TSO.  The
only constraint is that the backend must obey the rule that each id must
only be used in the response that occupies the same slot as the request.

The actual field of rx->id is still maintained for compatibility with
older backends.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
--
diff -r e90f22eee9a1 -r 2ec3e15164f7 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Jul 07 
23:35:46 2006 +1000
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Jul 07 
23:35:52 2006 +1000
@@ -99,17 +99,17 @@ struct netfront_info {
        struct timer_list rx_refill_timer;
 
        /*
-        * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
-        * array is an index into a chain of free entries.
+        * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
+        * is an index into a chain of free entries.
         */
        struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
-       struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
+       struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 
 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
        grant_ref_t gref_tx_head;
        grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
        grant_ref_t gref_rx_head;
-       grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1];
+       grant_ref_t grant_rx_ref[NET_TX_RING_SIZE];
 
        struct xenbus_device *xbdev;
        int tx_ring_ref;
@@ -122,7 +122,7 @@ struct netfront_info {
 };
 
 /*
- * Access macros for acquiring freeing slots in {tx,rx}_skbs[].
+ * Access macros for acquiring freeing slots in tx_skbs[].
  */
 
 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
@@ -136,6 +136,29 @@ static inline unsigned short get_id_from
        unsigned int id = (unsigned int)(unsigned long)list[0];
        list[0] = list[id];
        return id;
+}
+
+static inline int xennet_rxidx(RING_IDX idx)
+{
+       return idx & (NET_RX_RING_SIZE - 1);
+}
+
+static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+                                               RING_IDX ri)
+{
+       int i = xennet_rxidx(ri);
+       struct sk_buff *skb = np->rx_skbs[i];
+       np->rx_skbs[i] = NULL;
+       return skb;
+}
+
+static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+                                           RING_IDX ri)
+{
+       int i = xennet_rxidx(ri);
+       grant_ref_t ref = np->grant_rx_ref[i];
+       np->grant_rx_ref[i] = GRANT_INVALID_REF;
+       return ref;
 }
 
 #define DPRINTK(fmt, args...)                          \
@@ -598,8 +621,9 @@ static void network_alloc_rx_buffers(str
 
                skb->dev = dev;
 
-               id = get_id_from_freelist(np->rx_skbs);
-
+               id = xennet_rxidx(req_prod + i);
+
+               BUG_ON(np->rx_skbs[id]);
                np->rx_skbs[id] = skb;
 
                RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
@@ -840,6 +864,19 @@ static irqreturn_t netif_int(int irq, vo
        return IRQ_HANDLED;
 }
 
+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+                               grant_ref_t ref)
+{
+       int new = xennet_rxidx(np->rx.req_prod_pvt);
+
+       BUG_ON(np->rx_skbs[new]);
+       np->rx_skbs[new] = skb;
+       np->grant_rx_ref[new] = ref;
+       RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+       RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+       np->rx.req_prod_pvt++;
+       RING_PUSH_REQUESTS(&np->rx);
+}
 
 static int netif_poll(struct net_device *dev, int *pbudget)
 {
@@ -874,12 +911,15 @@ static int netif_poll(struct net_device 
             i++, work_done++) {
                rx = RING_GET_RESPONSE(&np->rx, i);
 
+               skb = xennet_get_rx_skb(np, i);
+               ref = xennet_get_rx_ref(np, i);
+
                /*
                 * This definitely indicates a bug, either in this driver or in
                 * the backend driver. In future this should flag the bad
                 * situation to the system controller to reboot the backed.
                 */
-               if ((ref = np->grant_rx_ref[rx->id]) == GRANT_INVALID_REF) {
+               if (ref == GRANT_INVALID_REF) {
                        WPRINTK("Bad rx response id %d.\n", rx->id);
                        work_done--;
                        continue;
@@ -890,21 +930,12 @@ static int netif_poll(struct net_device 
                        if (net_ratelimit())
                                WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
                                        rx->id, rx->status);
-                       RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
-                               rx->id;
-                       RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref =
-                               ref;
-                       np->rx.req_prod_pvt++;
-                       RING_PUSH_REQUESTS(&np->rx);
+                       xennet_move_rx_slot(np, skb, ref);
                        work_done--;
                        continue;
                }
 
                gnttab_release_grant_reference(&np->gref_rx_head, ref);
-               np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
-
-               skb = np->rx_skbs[rx->id];
-               add_id_to_freelist(np->rx_skbs, rx->id);
 
                /* NB. We handle skb overflow later. */
                skb->data = skb->head + rx->offset;
@@ -1128,6 +1159,7 @@ static void network_connect(struct net_d
        struct netfront_info *np = netdev_priv(dev);
        int i, requeue_idx;
        struct sk_buff *skb;
+       grant_ref_t ref;
 
        xennet_set_features(dev);
 
@@ -1158,15 +1190,25 @@ static void network_connect(struct net_d
        }
 
        /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
-       for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) {
-               if ((unsigned long)np->rx_skbs[i] < PAGE_OFFSET)
-                       continue;
+       for (i = 0; i < NET_RX_RING_SIZE; i++) {
+               if (!np->rx_skbs[i])
+                       break;
                gnttab_grant_foreign_transfer_ref(
                        np->grant_rx_ref[i], np->xbdev->otherend_id,
                        __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
-               RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
-                       np->grant_rx_ref[i];
-               RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
+               RING_GET_REQUEST(&np->rx, i)->gref = np->grant_rx_ref[i];
+               RING_GET_REQUEST(&np->rx, i)->id = i;
+       }
+       for (requeue_idx = i++; i < NET_RX_RING_SIZE; i++) {
+               if (!np->rx_skbs[i])
+                       continue;
+               skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
+               ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
+               gnttab_grant_foreign_transfer_ref(
+                       ref, np->xbdev->otherend_id,
+                       __pa(skb->data) >> PAGE_SHIFT);
+               RING_GET_REQUEST(&np->rx, requeue_idx)->gref = ref;
+               RING_GET_REQUEST(&np->rx, requeue_idx)->id = requeue_idx;
                requeue_idx++;
        }
 
@@ -1391,11 +1433,6 @@ static struct net_device * __devinit cre
                np->grant_tx_ref[i] = GRANT_INVALID_REF;
        }
 
-       for (i = 0; i <= NET_RX_RING_SIZE; i++) {
-               np->rx_skbs[i] = (void *)((unsigned long) i+1);
-               np->grant_rx_ref[i] = GRANT_INVALID_REF;
-       }
-
        /* A grant for every tx ring slot */
        if (gnttab_alloc_grant_references(TX_MAX_TARGET,
                                          &np->gref_tx_head) < 0) {

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel