WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] I updated the vcpu_to_cpu string creation to include a f

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] I updated the vcpu_to_cpu string creation to include a field separator,
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Wed, 06 Jul 2005 18:50:09 -0400
Delivery-date: Wed, 06 Jul 2005 22:50:31 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User iap10@xxxxxxxxxxxxxxxxxxxxx
# Node ID 88c2d410979f572a074c14e3423c9e1de63e57bb
# Parent  32fb371cc283f4e146f2a23241a2287510fb78b2



I updated the vcpu_to_cpu string creation to include a field separator,
which gets rid of the -1 -> # hack and works for cpus > 9.

I ran into some issues with stale vcpu_to_cpu lists when running the
hotplug subprogram.  I would take a vcpu offline, and then issue the
command to bring it back and the vcpu_to_cpu list would not have changed
to indicate the the vcpu actually went down.  If I injected a xm list -v
(which always showed the correct mapping) then subsequent hotplug
commands would see the state change and fire off the hotplug request.  I
don't know that not sending the event when not changing state saves that
much work so I took the state check out and now just send the hotplug
event directly.

> Also the whole hotplug stuff is still missing interrupt re-routing
> when a vcpu is taken down.  To do this, we need an evtchn operation to
> change the vcpu affinity of a port by changing notify_vcpu_id.

I don't fully understand all of the mappings that are happening, so this
part of the patch might be way off.  In any case, I've added a new
evtchn op to set the notify_vcpu_id field of a channel.  I updated the
HOTPLUG_CPU code to use the new routines when bringing cpus up and down.
When taking down a cpu, I route the IPI irq channels to CPU 0, and when
the cpu comes up, it re-routes the channels back to the awakened CPU.

From: Ryan Harper <ryanh@xxxxxxxxxx>
Signed-off-by: ian@xxxxxxxxxxxxx

diff -r 32fb371cc283 -r 88c2d410979f xen/common/event_channel.c
--- a/xen/common/event_channel.c        Wed Jul  6 18:57:54 2005
+++ b/xen/common/event_channel.c        Wed Jul  6 22:23:18 2005
@@ -579,6 +579,29 @@
     return rc;
 }
 
+static long evtchn_rebind(evtchn_rebind_t *bind) 
+{
+    struct domain *d    = current->domain;
+    int            port = bind->port;
+    int            vcpu = bind->vcpu;
+    struct evtchn *chn;
+    long             rc = 0;
+
+    spin_lock(&d->evtchn_lock);
+
+    if ( !port_is_valid(d, port) )
+    {
+        rc = -EINVAL;
+        goto out;
+    }
+
+    chn = evtchn_from_port(d, port);
+    chn->notify_vcpu_id = vcpu;
+
+ out:
+    spin_unlock(&d->evtchn_lock);
+    return rc;
+}
 
 long do_event_channel_op(evtchn_op_t *uop)
 {
@@ -633,6 +656,12 @@
 
     case EVTCHNOP_status:
         rc = evtchn_status(&op.u.status);
+        if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
+            rc = -EFAULT;
+        break;
+
+    case EVTCHNOP_rebind:
+        rc = evtchn_rebind(&op.u.rebind);
         if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) )
             rc = -EFAULT;
         break;
diff -r 32fb371cc283 -r 88c2d410979f 
linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
--- a/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c  Wed Jul  6 18:57:54 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c  Wed Jul  6 22:23:18 2005
@@ -271,6 +271,38 @@
     return irq;
 }
 
+void rebind_evtchn_from_ipi(int cpu, int newcpu, int ipi)
+{
+    evtchn_op_t op;
+    int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    op.cmd          = EVTCHNOP_rebind;
+    op.u.rebind.port = evtchn;
+    op.u.rebind.vcpu = newcpu;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+       printk(KERN_INFO "Failed to rebind IPI%d to CPU%d\n",ipi,newcpu);
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
+void rebind_evtchn_from_irq(int cpu, int newcpu, int irq)
+{
+    evtchn_op_t op;
+    int evtchn = irq_to_evtchn[irq];
+
+    spin_lock(&irq_mapping_update_lock);
+
+    op.cmd          = EVTCHNOP_rebind;
+    op.u.rebind.port = evtchn;
+    op.u.rebind.vcpu = newcpu;
+    if ( HYPERVISOR_event_channel_op(&op) != 0 )
+       printk(KERN_INFO "Failed to rebind IRQ%d to CPU%d\n",irq,newcpu);
+
+    spin_unlock(&irq_mapping_update_lock);
+}
+
 void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
 {
     evtchn_op_t op;
diff -r 32fb371cc283 -r 88c2d410979f tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Wed Jul  6 18:57:54 2005
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Jul  6 22:23:18 2005
@@ -423,8 +423,10 @@
             sxpr.append(['cpu_time', self.info['cpu_time']/1e9])    
             sxpr.append(['vcpus', self.info['vcpus']])
             sxpr.append(['cpumap', self.info['cpumap']])
-            sxpr.append(['vcpu_to_cpu', ''.join(map(lambda x: str(x),
-                        self.info['vcpu_to_cpu'][0:self.info['vcpus']]))])
+            # build a string, using '|' to seperate items, show only up
+            # to number of vcpus in domain, and trim the trailing '|'
+            sxpr.append(['vcpu_to_cpu', ''.join(map(lambda x: str(x)+'|',
+                        self.info['vcpu_to_cpu'][0:self.info['vcpus']]))[:-1]])
             
         if self.start_time:
             up_time =  time.time() - self.start_time  
diff -r 32fb371cc283 -r 88c2d410979f xen/include/public/event_channel.h
--- a/xen/include/public/event_channel.h        Wed Jul  6 18:57:54 2005
+++ b/xen/include/public/event_channel.h        Wed Jul  6 22:23:18 2005
@@ -158,6 +158,13 @@
     } u;
 } evtchn_status_t;
 
+#define EVTCHNOP_rebind        8
+typedef struct {
+    /* IN parameters. */
+    u32 port;                         /*  0 */
+    u32 vcpu;                         /*  4 */
+} evtchn_rebind_t; /* 8 bytes */
+
 typedef struct evtchn_op {
     u32 cmd; /* EVTCHNOP_* */
     union {
@@ -169,6 +176,7 @@
         evtchn_close_t            close;
         evtchn_send_t             send;
         evtchn_status_t           status;
+        evtchn_rebind_t           rebind;
     } u;
 } evtchn_op_t;
 
diff -r 32fb371cc283 -r 88c2d410979f 
linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    Wed Jul  6 
18:57:54 2005
+++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c    Wed Jul  6 
22:23:18 2005
@@ -103,6 +103,11 @@
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 #endif
 
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+static char resched_name[NR_CPUS][15];
+static char callfunc_name[NR_CPUS][15];
+
 #if 0
 /*
  * Currently trivial. Write the real->protected mode
@@ -1328,6 +1333,10 @@
        while (!cpu_online(cpu))
                cpu_relax();
 
+   /* re-route bound IRQs 0 to cpu */
+   rebind_evtchn_from_irq(0, cpu,  per_cpu(resched_irq, cpu));
+   rebind_evtchn_from_irq(0, cpu, per_cpu(callfunc_irq, cpu));
+
        fixup_irqs(cpu_online_map);
        /* counter the disable in fixup_irqs() */
        local_irq_enable();
@@ -1357,6 +1366,11 @@
 
        cpu_clear(cpu, map);
        fixup_irqs(map);
+
+   /* re-route IRQs from dead vcpu to another */
+   rebind_evtchn_from_irq(cpu, 0,  per_cpu(resched_irq, cpu));
+   rebind_evtchn_from_irq(cpu, 0, per_cpu(callfunc_irq, cpu));
+
        /* It's now safe to remove this processor from the online map */
        cpu_clear(cpu, cpu_online_map);
 
@@ -1514,11 +1528,6 @@
 extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
 extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
 
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static char resched_name[NR_CPUS][15];
-static char callfunc_name[NR_CPUS][15];
-
 void __init smp_intr_init(void)
 {
        int cpu = smp_processor_id();
diff -r 32fb371cc283 -r 88c2d410979f tools/python/xen/xm/main.py
--- a/tools/python/xen/xm/main.py       Wed Jul  6 18:57:54 2005
+++ b/tools/python/xen/xm/main.py       Wed Jul  6 22:23:18 2005
@@ -410,8 +410,7 @@
         print 'Name              Id  VCPU  CPU  CPUMAP'
         for dom in doms:
             info = server.xend_domain(dom)
-            # XXX this is quite broken for cpu's > 9
-            vcpu_to_cpu = sxp.child_value(info, 'vcpu_to_cpu', 
'?').replace('-1','#')
+            vcpu_to_cpu = sxp.child_value(info, 'vcpu_to_cpu', '-1').split('|')
             cpumap = sxp.child_value(info, 'cpumap', [])
             mask = ((int(sxp.child_value(info, 'vcpus', '0')))**2) - 1
             count = 0
@@ -420,10 +419,7 @@
                 d['name']   = sxp.child_value(info, 'name', '??')
                 d['dom']    = int(sxp.child_value(info, 'id', '-1'))
                 d['vcpu']   = int(count)
-                if cpu == "#":
-                    d['cpu']    = int("-1")
-                else:
-                    d['cpu']    = int(cpu)
+                d['cpu']    = int(cpu)
                 d['cpumap'] = int(cpumap[count])&mask
                 count = count + 1
                 print ("%(name)-16s %(dom)3d  %(vcpu)4d  %(cpu)3d  
0x%(cpumap)x" % d)
@@ -593,15 +589,7 @@
         state = int(args[3])
         dom = server.xend_domain(name)
         id = sxp.child_value(dom, 'id')
-        vcpu_to_cpu = sxp.child_value(dom, 'vcpu_to_cpu', '-1')
-        # only send state change if states differ 
-        try:
-            # (down going up) or (up going down)
-            if (vcpu_to_cpu[vcpu] == "-1" and state == 1) or \
-               (vcpu_to_cpu[vcpu] != "-1" and state == 0):
-                server.xend_domain_vcpu_hotplug(id, vcpu, state)
-        except IndexError:
-            print "Invalid VCPU(%d)"%(vcpu)
+        server.xend_domain_vcpu_hotplug(id, vcpu, state)
 
 xm.prog(ProgVcpuhotplug)
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] I updated the vcpu_to_cpu string creation to include a field separator,, Xen patchbot -unstable <=