WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Merged.

# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID dfb8362648982b51d80ef589eafd74688c09ccc3
# Parent  970cf1fff5f2b55f4f646950f5c7a747a6db92f9
# Parent  fbf58585008a9a8d4bec2d47ac64166c77d5bf7f
Merged.

diff -r 970cf1fff5f2 -r dfb836264898 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Mon Jan  9 
11:24:02 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c      Mon Jan  9 
14:43:46 2006
@@ -109,7 +109,7 @@
                                size_t len, loff_t *ppos)
 {
        struct xenbus_dev_data *u = filp->private_data;
-       struct xenbus_dev_transaction *trans;
+       struct xenbus_dev_transaction *trans = NULL;
        void *reply;
 
        if ((len + u->len) > sizeof(u->u.buffer))
@@ -134,14 +134,19 @@
        case XS_MKDIR:
        case XS_RM:
        case XS_SET_PERMS:
-               reply = xenbus_dev_request_and_reply(&u->u.msg);
-               if (IS_ERR(reply))
-                       return PTR_ERR(reply);
-
                if (u->u.msg.type == XS_TRANSACTION_START) {
                        trans = kmalloc(sizeof(*trans), GFP_KERNEL);
                        if (!trans)
                                return -ENOMEM;
+               }
+
+               reply = xenbus_dev_request_and_reply(&u->u.msg);
+               if (IS_ERR(reply)) {
+                       kfree(trans);
+                       return PTR_ERR(reply);
+               }
+
+               if (u->u.msg.type == XS_TRANSACTION_START) {
                        trans->handle = (struct xenbus_transaction *)
                                simple_strtoul(reply, NULL, 0);
                        list_add(&trans->list, &u->transactions);
diff -r 970cf1fff5f2 -r dfb836264898 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Mon Jan  9 11:24:02 2006
+++ b/xen/arch/x86/domain.c     Mon Jan  9 14:43:46 2006
@@ -689,6 +689,9 @@
     struct vcpu          *p = percpu_ctxt[cpu].curr_vcpu;
     struct vcpu          *n = current;
 
+    ASSERT(p != n);
+    ASSERT(cpus_empty(n->vcpu_dirty_cpumask));
+
     if ( !is_idle_domain(p->domain) )
     {
         memcpy(&p->arch.guest_context.user_regs,
@@ -748,24 +751,31 @@
 void context_switch(struct vcpu *prev, struct vcpu *next)
 {
     unsigned int cpu = smp_processor_id();
+    cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
 
     ASSERT(local_irq_is_enabled());
 
+    /* Allow at most one CPU at a time to be dirty. */
+    ASSERT(cpus_weight(dirty_mask) <= 1);
+    if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) )
+    {
+        /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
+        flush_tlb_mask(dirty_mask);
+    }
+
+    local_irq_disable();
+
     set_current(next);
 
-    if ( (percpu_ctxt[cpu].curr_vcpu != next) &&
-         !is_idle_domain(next->domain) )
-    {
-        /* This may happen if next has been migrated by the scheduler. */
-        if ( unlikely(!cpus_empty(next->vcpu_dirty_cpumask)) )
-        {
-            ASSERT(!cpu_isset(cpu, next->vcpu_dirty_cpumask));
-            sync_vcpu_execstate(next);
-            ASSERT(cpus_empty(next->vcpu_dirty_cpumask));
-        }
-
-        local_irq_disable();
+    if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_domain(next->domain) )
+    {
+        local_irq_enable();
+    }
+    else
+    {
         __context_switch();
+
+        /* Re-enable interrupts before restoring state which may fault. */
         local_irq_enable();
 
         if ( VMX_DOMAIN(next) )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>