WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

[Xen-devel] [Patch] continue_hypercall_on_cpu rework using tasklets

To: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [Patch] continue_hypercall_on_cpu rework using tasklets
From: Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
Date: Wed, 30 Dec 2009 14:46:37 +0100
Delivery-date: Wed, 30 Dec 2009 05:47:03 -0800
Dkim-signature: v=1; a=rsa-sha256; c=simple/simple; d=ts.fujitsu.com; i=juergen.gross@xxxxxxxxxxxxxx; q=dns/txt; s=s1536b; t=1262180816; x=1293716816; h=from:sender:reply-to:subject:date:message-id:to:cc: mime-version:content-transfer-encoding:content-id: content-description:resent-date:resent-from:resent-sender: resent-to:resent-cc:resent-message-id:in-reply-to: references:list-id:list-help:list-unsubscribe: list-subscribe:list-post:list-owner:list-archive; z=From:=20Juergen=20Gross=20<juergen.gross@xxxxxxxxxxxxxx> |Subject:=20[Patch]=20continue_hypercall_on_cpu=20rework =20using=20tasklets|Date:=20Wed,=2030=20Dec=202009=2014:4 6:37=20+0100|Message-ID:=20<4B3B59BD.90505@xxxxxxxxxxxxxx >|To:=20"xen-devel@xxxxxxxxxxxxxxxxxxx"=20<xen-devel@list s.xensource.com>|MIME-Version:=201.0; bh=+wZLTaF+BCNArJDm1T+WZyBhBJ37EC+MzUSVPz9WOoc=; b=a3ai9u0y9AQYfW+iZsK4yyy2rKMrjI5110dLLgNExrwrbjj0faCGgo8V YwBMq9PqWUiiUoTzc/qQFxSmV85Hiqp/4ppuxKn29uyM5b1x+mBbzXj2N 9WNgI2VvtQSsBDKNxZomv7h2Tf16s6OTyWqcQzB/eJyOa4PoMYugmYv4q WYVtxY+AzNPI3vAIs03gUBH+nMDPiupR0lGEzuKI4/5PRQQs3OeU8Rd+r shS2sU5U/E5aswGRNXVkAzY75zq2o;
Domainkey-signature: s=s1536a; d=ts.fujitsu.com; c=nofws; q=dns; h=X-SBRSScore:X-IronPort-AV:Received:X-IronPort-AV: Received:Received:Message-ID:Date:From:Organization: User-Agent:MIME-Version:To:Subject:X-Enigmail-Version: Content-Type; b=flZHfOi7UdnndlNR806zTv+DUJdLvOnMm5IYtkNlXgz7bJH+psbhZV3x PpTzhxiVPGhzqGzx+3uVD9Sx5Gof4P+iL5rlYDnN6Nn5rBujMIJ/QzAE9 uuF7GSXD+sKA9ZK+h/kYBtjEC2TDUNEhOWnAo8fSwfdwizCSOXbTkTDwU HVMgR21cqcIZMdPhplFywl4+NXtzTMRahDj3FTOLFBmqkLNEKVPiYmEpd WaR06GUSkaIXsN1b3nOKhcvjxxX6+;
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Organization: Fujitsu Technology Solutions
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mozilla-Thunderbird 2.0.0.22 (X11/20090707)
Hi,

attached is a patch to use tasklets for continue_hypercall_on_cpu instead of
temporarily pinning the vcpu to the target physical cpu.

This is thought as base for cpupools as Keir requested to get rid of the
"borrow cpu" stuff in my original solution.

Tested on x86_64 via a little test hypercall calling continue_hypercall_on_cpu
with different target cpus.

Keir, is this solution the direction you wanted to go to?


Juergen

-- 
Juergen Gross                 Principal Developer Operating Systems
TSP ES&S SWE OS6                       Telephone: +49 (0) 89 3222 2967
Fujitsu Technolgy Solutions               e-mail: juergen.gross@xxxxxxxxxxxxxx
Domagkstr. 28                           Internet: ts.fujitsu.com
D-80807 Muenchen                 Company details: ts.fujitsu.com/imprint.html
# HG changeset patch
# User juergen.gross@xxxxxxxxxxxxxx
# Date 1262180329 -3600
# Node ID 1aa6f84167e2b4bcfed265d775bc3ac72ce321ed
# Parent  3f654b88e201a1341786a0e8725c25f40c1162b7

Signed-off-by: juergen.gross@xxxxxxxxxxxxxx

continue_hypercall_on_cpu rework using tasklets

diff -r 3f654b88e201 -r 1aa6f84167e2 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Tue Dec 29 15:11:47 2009 +0000
+++ b/xen/arch/x86/domain.c     Wed Dec 30 14:38:49 2009 +0100
@@ -1506,42 +1506,71 @@
 }
 
 struct migrate_info {
+    struct tasklet tasklet;
     long (*func)(void *data);
     void *data;
     void (*saved_schedule_tail)(struct vcpu *);
-    cpumask_t saved_affinity;
     unsigned int nest;
+    int ret;
+    struct vcpu *v;
+    volatile int ready;
 };
+
+static DEFINE_PER_CPU(struct migrate_info *, mig_info);
 
 static void continue_hypercall_on_cpu_helper(struct vcpu *v)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct migrate_info *info = v->arch.continue_info;
-    cpumask_t mask = info->saved_affinity;
     void (*saved_schedule_tail)(struct vcpu *) = info->saved_schedule_tail;
 
-    regs->eax = info->func(info->data);
+    while ( !info->ready )
+        cpu_relax();
+
+    regs->eax = info->ret;
 
     if ( info->nest-- == 0 )
     {
+        tasklet_kill(&info->tasklet);
         xfree(info);
         v->arch.schedule_tail = saved_schedule_tail;
         v->arch.continue_info = NULL;
-        vcpu_unlock_affinity(v, &mask);
     }
 
     (*saved_schedule_tail)(v);
+}
+
+static void continue_hypercall_on_cpu_tasklet(struct migrate_info *info)
+{
+    this_cpu(mig_info) = info;
+
+    while ( !vcpu_runnable(info->v) && info->v->is_running )
+        cpu_relax();
+
+    info->ret = info->func(info->data);
+
+    if ( info->nest == 0 )
+    {
+        info->ready = 1;
+        vcpu_wake(info->v);
+    }
+
+    this_cpu(mig_info) = NULL;
+
+    return;
 }
 
 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data)
 {
     struct vcpu *v = current;
     struct migrate_info *info;
-    cpumask_t mask = cpumask_of_cpu(cpu);
-    int rc;
 
     if ( cpu == smp_processor_id() )
         return func(data);
+
+    info = this_cpu(mig_info);
+    if ( info != NULL )
+        v = info->v;
 
     info = v->arch.continue_info;
     if ( info == NULL )
@@ -1550,16 +1579,12 @@
         if ( info == NULL )
             return -ENOMEM;
 
-        rc = vcpu_lock_affinity(v, &mask);
-        if ( rc )
-        {
-            xfree(info);
-            return rc;
-        }
-
         info->saved_schedule_tail = v->arch.schedule_tail;
-        info->saved_affinity = mask;
         info->nest = 0;
+        info->v = v;
+        tasklet_init(&info->tasklet,
+                     (void(*)(unsigned long))continue_hypercall_on_cpu_tasklet,
+                     (unsigned long)info);
 
         v->arch.schedule_tail = continue_hypercall_on_cpu_helper;
         v->arch.continue_info = info;
@@ -1567,17 +1592,18 @@
     else
     {
         BUG_ON(info->nest != 0);
-        rc = vcpu_locked_change_affinity(v, &mask);
-        if ( rc )
-            return rc;
         info->nest++;
     }
 
     info->func = func;
     info->data = data;
+    info->ready = 0;
+
+    tasklet_schedule_cpu(&info->tasklet, cpu);
+    vcpu_sleep_nosync(v);
+    raise_softirq(SCHEDULE_SOFTIRQ);
 
     /* Dummy return value will be overwritten by new schedule_tail. */
-    BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));
     return 0;
 }
 
diff -r 3f654b88e201 -r 1aa6f84167e2 xen/common/schedule.c
--- a/xen/common/schedule.c     Tue Dec 29 15:11:47 2009 +0000
+++ b/xen/common/schedule.c     Wed Dec 30 14:38:49 2009 +0100
@@ -367,26 +367,17 @@
     }
 }
 
-static int __vcpu_set_affinity(
-    struct vcpu *v, cpumask_t *affinity,
-    bool_t old_lock_status, bool_t new_lock_status)
+int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
 {
     cpumask_t online_affinity, old_affinity;
 
+    if ( v->domain->is_pinned )
+        return -EINVAL;
     cpus_and(online_affinity, *affinity, cpu_online_map);
     if ( cpus_empty(online_affinity) )
         return -EINVAL;
 
     vcpu_schedule_lock_irq(v);
-
-    if ( v->affinity_locked != old_lock_status )
-    {
-        BUG_ON(!v->affinity_locked);
-        vcpu_schedule_unlock_irq(v);
-        return -EBUSY;
-    }
-
-    v->affinity_locked = new_lock_status;
 
     old_affinity = v->cpu_affinity;
     v->cpu_affinity = *affinity;
@@ -403,36 +394,6 @@
     }
 
     return 0;
-}
-
-int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    if ( v->domain->is_pinned )
-        return -EINVAL;
-    return __vcpu_set_affinity(v, affinity, 0, 0);
-}
-
-int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    return __vcpu_set_affinity(v, affinity, 0, 1);
-}
-
-int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    return __vcpu_set_affinity(v, affinity, 1, 1);
-}
-
-void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity)
-{
-    cpumask_t online_affinity;
-
-    /* Do not fail if no CPU in old affinity mask is online. */
-    cpus_and(online_affinity, *affinity, cpu_online_map);
-    if ( cpus_empty(online_affinity) )
-        *affinity = cpu_online_map;
-
-    if ( __vcpu_set_affinity(v, affinity, 1, 0) != 0 )
-        BUG();
 }
 
 /* Block the currently-executing domain until a pertinent event occurs. */
diff -r 3f654b88e201 -r 1aa6f84167e2 xen/common/softirq.c
--- a/xen/common/softirq.c      Tue Dec 29 15:11:47 2009 +0000
+++ b/xen/common/softirq.c      Wed Dec 30 14:38:49 2009 +0100
@@ -88,9 +88,11 @@
 }
 
 static LIST_HEAD(tasklet_list);
+static DEFINE_PER_CPU(struct list_head, tasklet_list_pcpu);
 static DEFINE_SPINLOCK(tasklet_lock);
 
-void tasklet_schedule(struct tasklet *t)
+static void tasklet_schedule_list(struct tasklet *t, struct list_head *tlist,
+    int cpu)
 {
     unsigned long flags;
 
@@ -101,28 +103,44 @@
         if ( !t->is_scheduled && !t->is_running )
         {
             BUG_ON(!list_empty(&t->list));
-            list_add_tail(&t->list, &tasklet_list);
+            list_add_tail(&t->list, tlist);
         }
         t->is_scheduled = 1;
-        raise_softirq(TASKLET_SOFTIRQ);
+        if ( cpu == smp_processor_id() )
+            raise_softirq(TASKLET_SOFTIRQ);
+        else
+            cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
     }
 
     spin_unlock_irqrestore(&tasklet_lock, flags);
 }
 
+void tasklet_schedule(struct tasklet *t)
+{
+    tasklet_schedule_list(t, &tasklet_list, smp_processor_id());
+}
+
+void tasklet_schedule_cpu(struct tasklet *t, int cpu)
+{
+    tasklet_schedule_list(t, &per_cpu(tasklet_list_pcpu, cpu), cpu);
+}
+
 static void tasklet_action(void)
 {
+    struct list_head *tlist;
     struct tasklet *t;
 
     spin_lock_irq(&tasklet_lock);
 
-    if ( list_empty(&tasklet_list) )
+    tlist = ( list_empty(&this_cpu(tasklet_list_pcpu)) ) ? &tasklet_list :
+        &this_cpu(tasklet_list_pcpu);
+    if ( list_empty(tlist) )
     {
         spin_unlock_irq(&tasklet_lock);
         return;
     }
 
-    t = list_entry(tasklet_list.next, struct tasklet, list);
+    t = list_entry(tlist->next, struct tasklet, list);
     list_del_init(&t->list);
 
     BUG_ON(t->is_dead || t->is_running || !t->is_scheduled);
@@ -138,14 +156,15 @@
     if ( t->is_scheduled )
     {
         BUG_ON(t->is_dead || !list_empty(&t->list));
-        list_add_tail(&t->list, &tasklet_list);
+        list_add_tail(&t->list, tlist);
     }
 
     /*
      * If there is more work to do then reschedule. We don't grab more work
      * immediately as we want to allow other softirq work to happen first.
      */
-    if ( !list_empty(&tasklet_list) )
+    if ( !list_empty(&tasklet_list) ||
+        !list_empty(&this_cpu(tasklet_list_pcpu)) )
         raise_softirq(TASKLET_SOFTIRQ);
 
     spin_unlock_irq(&tasklet_lock);
@@ -186,6 +205,12 @@
 
 void __init softirq_init(void)
 {
+    int i;
+
+    for_each_possible_cpu ( i )
+    {
+        INIT_LIST_HEAD(&per_cpu(tasklet_list_pcpu, i));
+    }
     open_softirq(TASKLET_SOFTIRQ, tasklet_action);
 }
 
diff -r 3f654b88e201 -r 1aa6f84167e2 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Tue Dec 29 15:11:47 2009 +0000
+++ b/xen/include/xen/sched.h   Wed Dec 30 14:38:49 2009 +0100
@@ -130,8 +130,6 @@
     bool_t           defer_shutdown;
     /* VCPU is paused following shutdown request (d->is_shutting_down)? */
     bool_t           paused_for_shutdown;
-    /* VCPU affinity is temporarily locked from controller changes? */
-    bool_t           affinity_locked;
 
     /*
      * > 0: a single port is being polled;
@@ -579,9 +577,6 @@
 void vcpu_force_reschedule(struct vcpu *v);
 void cpu_disable_scheduler(void);
 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
-int vcpu_lock_affinity(struct vcpu *v, cpumask_t *affinity);
-int vcpu_locked_change_affinity(struct vcpu *v, cpumask_t *affinity);
-void vcpu_unlock_affinity(struct vcpu *v, cpumask_t *affinity);
 
 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
 uint64_t get_cpu_idle_time(unsigned int cpu);
diff -r 3f654b88e201 -r 1aa6f84167e2 xen/include/xen/softirq.h
--- a/xen/include/xen/softirq.h Tue Dec 29 15:11:47 2009 +0000
+++ b/xen/include/xen/softirq.h Wed Dec 30 14:38:49 2009 +0100
@@ -58,6 +58,7 @@
     struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data }
 
 void tasklet_schedule(struct tasklet *t);
+void tasklet_schedule_cpu(struct tasklet *t, int cpu);
 void tasklet_kill(struct tasklet *t);
 void tasklet_init(
     struct tasklet *t, void (*func)(unsigned long), unsigned long data);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>