WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Upgrade FreeBSD sparse tree from testing.bk to unstable.

To: xen-changelog@xxxxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Upgrade FreeBSD sparse tree from testing.bk to unstable.bk
From: BitKeeper Bot <riel@xxxxxxxxxxx>
Date: Mon, 21 Mar 2005 09:00:17 +0000
Delivery-date: Mon, 21 Mar 2005 16:04:53 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-archive: <http://sourceforge.net/mailarchive/forum.php?forum=xen-changelog>
List-help: <mailto:xen-changelog-request@lists.sourceforge.net?subject=help>
List-id: <xen-changelog.lists.sourceforge.net>
List-post: <mailto:xen-changelog@lists.sourceforge.net>
List-subscribe: <https://lists.sourceforge.net/lists/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.sourceforge.net?subject=subscribe>
List-unsubscribe: <https://lists.sourceforge.net/lists/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.sourceforge.net?subject=unsubscribe>
Reply-to: Xen Development List <xen-devel@xxxxxxxxxxxxxxxxxxxxx>
Sender: xen-changelog-admin@xxxxxxxxxxxxxxxxxxxxx
ChangeSet 1.1335, 2005/03/21 09:00:17+00:00, iap10@xxxxxxxxxxxxxxxxxxx

        Upgrade FreeBSD sparse tree from testing.bk to unstable.bk
        Signed-off-by: ian.pratt@xxxxxxxxxxxx



 i386-xen/clock.c           |    2 
 i386-xen/ctrl_if.c         |  171 +++++++++++++++++++----------
 i386-xen/evtchn.c          |   17 +-
 i386-xen/locore.s          |    2 
 i386-xen/machdep.c         |  107 ++++++++++--------
 i386-xen/pmap.c            |    3 
 i386-xen/vm_machdep.c      |    8 -
 i386-xen/xen_machdep.c     |  264 +++++++++++++++++++++------------------------
 include/evtchn.h           |   22 +++
 include/pmap.h             |    4 
 include/vmparam.h          |    2 
 include/xen-os.h           |  117 +++++++++++--------
 xen/blkfront/xb_blkfront.c |  136 +++++++++++------------
 xen/misc/evtchn_dev.c      |   18 +--
 xen/netfront/xn_netfront.c |   13 --
 15 files changed, 492 insertions(+), 394 deletions(-)


diff -Nru a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c 
b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c
--- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c  2005-03-21 11:04:06 
-05:00
+++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/clock.c  2005-03-21 11:04:06 
-05:00
@@ -105,6 +105,8 @@
 #define TIMER_FREQ   1193182
 #endif
 u_int  timer_freq = TIMER_FREQ;
+struct mtx clock_lock;
+
 
 static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
 
diff -Nru a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c 
b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c
--- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c        2005-03-21 
11:04:05 -05:00
+++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/ctrl_if.c        2005-03-21 
11:04:05 -05:00
@@ -39,6 +39,18 @@
 #include <machine/evtchn.h>
 
 /*
+ * Extra ring macros to sync a consumer index up to the public producer index. 
+ * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
+ */
+#define RING_DROP_PENDING_REQUESTS(_r)                                  \
+    do {                                                                \
+        (_r)->req_cons = (_r)->sring->req_prod;                         \
+    } while (0)
+#define RING_DROP_PENDING_RESPONSES(_r)                                 \
+    do {                                                                \
+        (_r)->rsp_cons = (_r)->sring->rsp_prod;                         \
+    } while (0)
+/*
  * Only used by initial domain which must create its own control-interface
  * event channel. This value is picked up by the user-space domain controller
  * via an ioctl.
@@ -51,8 +63,8 @@
 static int *      ctrl_if_wchan = &ctrl_if_evtchn;
 
 
-static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
-static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+static ctrl_front_ring_t ctrl_if_tx_ring;
+static ctrl_back_ring_t  ctrl_if_rx_ring;
 
 /* Incoming message requests. */
     /* Primary message type -> message handler. */
@@ -85,7 +97,7 @@
 TASKQUEUE_DEFINE(ctrl_if_txB, NULL, NULL, {});
 struct taskqueue **taskqueue_ctrl_if_tx[2] = { &taskqueue_ctrl_if_txA,
                                               &taskqueue_ctrl_if_txB };
-int ctrl_if_idx;
+static int ctrl_if_idx = 0;
 
 static struct task ctrl_if_rx_tasklet;
 static struct task ctrl_if_tx_tasklet;
@@ -95,8 +107,6 @@
 
 
 #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
-#define TX_FULL(_c)   \
-    (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
 
 static void 
 ctrl_if_notify_controller(void)
@@ -114,13 +124,17 @@
 static void 
 __ctrl_if_tx_tasklet(void *context __unused, int pending __unused)
 {
-    control_if_t *ctrl_if = get_ctrl_if();
     ctrl_msg_t   *msg;
-    int           was_full = TX_FULL(ctrl_if);
+    int           was_full = RING_FULL(&ctrl_if_tx_ring);
+    RING_IDX      i, rp;
+
+    i  = ctrl_if_tx_ring.rsp_cons;
+    rp = ctrl_if_tx_ring.sring->rsp_prod;
+    rmb(); /* Ensure we see all requests up to 'rp'. */
 
-    while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+    for ( ; i != rp; i++ )
     {
-        msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
+        msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
 
         /* Execute the callback handler, if one was specified. */
         if ( msg->id != 0xFF )
@@ -131,77 +145,102 @@
             ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
         }
 
-        /*
-         * Step over the message in the ring /after/ finishing reading it. As 
-         * soon as the index is updated then the message may get blown away.
-         */
-        smp_mb();
-        ctrl_if_tx_resp_cons++;
     }
 
-    if ( was_full && !TX_FULL(ctrl_if) )
+    /*
+     * Step over the message in the ring /after/ finishing reading it. As 
+     * soon as the index is updated then the message may get blown away.
+     */
+    smp_mb();
+    ctrl_if_tx_ring.rsp_cons = i;
+
+    if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
     {
         wakeup(ctrl_if_wchan);
 
        /* bump idx so future enqueues will occur on the next taskq
         * process any currently pending tasks
         */
-       ctrl_if_idx++;          
+       ctrl_if_idx++;
         taskqueue_run(*taskqueue_ctrl_if_tx[(ctrl_if_idx-1) & 1]);
     }
+
 }
 
 static void 
 __ctrl_if_rxmsg_deferred_task(void *context __unused, int pending __unused)
 {
     ctrl_msg_t *msg;
+    CONTROL_RING_IDX dp;
 
-    while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod )
+    dp = ctrl_if_rxmsg_deferred_prod;
+    rmb(); /* Ensure we see all deferred requests up to 'dp'. */
+    
+    while ( ctrl_if_rxmsg_deferred_cons != dp )
     {
         msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
             ctrl_if_rxmsg_deferred_cons++)];
         (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
     }
+    
 }
 
 static void 
 __ctrl_if_rx_tasklet(void *context __unused, int pending __unused)
 {
-    control_if_t *ctrl_if = get_ctrl_if();
     ctrl_msg_t    msg, *pmsg;
+    CONTROL_RING_IDX dp;
+    RING_IDX rp, i;
+
+    i  = ctrl_if_rx_ring.req_cons;
+    rp = ctrl_if_rx_ring.sring->req_prod;
+    dp = ctrl_if_rxmsg_deferred_prod;
 
-    while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+    rmb(); /* Ensure we see all requests up to 'rp'. */
+    
+    for ( ; i != rp; i++) 
     {
-        pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
+        pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
         memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+       
+       if ( msg.length > sizeof(msg.msg))
+           msg.length = sizeof(msg.msg);
         if ( msg.length != 0 )
             memcpy(msg.msg, pmsg->msg, msg.length);
         if ( test_bit(msg.type, &ctrl_if_rxmsg_blocking_context) )
         {
-            pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
-                ctrl_if_rxmsg_deferred_prod++)];
-            memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length);
-            taskqueue_enqueue(taskqueue_thread, &ctrl_if_rxmsg_deferred_task);
+            memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)], 
+                   &msg, offsetof(ctrl_msg_t, msg) + msg.length);
         }
         else
         {
             (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
         }
     }
+    ctrl_if_rx_ring.req_cons = i;
+
+    if ( dp != ctrl_if_rxmsg_deferred_prod )
+    {
+        wmb();
+        ctrl_if_rxmsg_deferred_prod = dp;
+        taskqueue_enqueue(taskqueue_thread, &ctrl_if_rxmsg_deferred_task);
+    }
+
 }
 
 static void 
 ctrl_if_interrupt(void *ctrl_sc)
 /* (int irq, void *dev_id, struct pt_regs *regs) */
 {
-    control_if_t *ctrl_if = get_ctrl_if();
 
-    if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
+    
+    if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
        taskqueue_enqueue(taskqueue_swi, &ctrl_if_tx_tasklet);
     
 
-    if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
+    if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
        taskqueue_enqueue(taskqueue_swi, &ctrl_if_rx_tasklet);
+    
 }
 
 int 
@@ -210,13 +249,13 @@
     ctrl_msg_handler_t hnd,
     unsigned long id)
 {
-    control_if_t *ctrl_if = get_ctrl_if();
     unsigned long flags;
+    ctrl_msg_t   *dmsg;
     int           i;
 
     mtx_lock_irqsave(&ctrl_if_lock, flags);
 
-    if ( TX_FULL(ctrl_if) )
+    if ( RING_FULL(&ctrl_if_tx_ring) )
     {
         mtx_unlock_irqrestore(&ctrl_if_lock, flags);
         return EAGAIN;
@@ -232,10 +271,11 @@
         msg->id = i;
     }
 
-    memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)], 
-           msg, sizeof(*msg));
-    wmb(); /* Write the message before letting the controller peek at it. */
-    ctrl_if->tx_req_prod++;
+    dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring, 
+            ctrl_if_tx_ring.req_prod_pvt);
+    memcpy(dmsg, msg, sizeof(*msg));
+    ctrl_if_tx_ring.req_prod_pvt++;
+    RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
 
     mtx_unlock_irqrestore(&ctrl_if_lock, flags);
 
@@ -252,34 +292,35 @@
     long wait_state)
 {
     int rc, sst = 0;
-
+    
     /* Fast path. */
-    if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != EAGAIN )
-        return rc;
-
-


-------------------------------------------------------
SF email is sponsored by - The IT Product Guide
Read honest & candid reviews on hundreds of IT Products from real users.
Discover which products truly live up to the hype. Start reading now.
http://ads.osdn.com/?ad_id=6595&alloc_id=14396&op=click
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxxx
https://lists.sourceforge.net/lists/listinfo/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Upgrade FreeBSD sparse tree from testing.bk to unstable.bk, BitKeeper Bot <=