The current cpupools code interface is a bit inconsistent. This
patch addresses this by making the interaction for each
vcpu in a pool look like this:
alloc_vdata() -- allocates and sets up vcpu data
insert_vcpu() -- the vcpu is ready to run in this pool
remove_vcpu() -- take the vcpu out of the pool
free_vdata() -- delete allocated vcpu data
(Previously, remove_vcpu and free_vdata were combined into a "destroy
vcpu", and insert_vcpu was only called for idle vcpus.)
This also addresses a bug in credit2 which was caused by a misunderstanding
of the cpupools interface.
Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
diff -r 1385b15e168f -r b2725b65076e xen/common/sched_credit.c
--- a/xen/common/sched_credit.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/common/sched_credit.c Wed Oct 20 16:49:04 2010 +0100
@@ -677,10 +677,23 @@
static void
csched_free_vdata(const struct scheduler *ops, void *priv)
{
+ struct csched_vcpu *svc = priv;
+
+ BUG_ON( !list_empty(&svc->runq_elem) );
+
+ xfree(svc);
+}
+
+static void
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
struct csched_private *prv = CSCHED_PRIV(ops);
- struct csched_vcpu *svc = priv;
+ struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_dom * const sdom = svc->sdom;
unsigned long flags;
+ CSCHED_STAT_CRANK(vcpu_destroy);
+
if ( __vcpu_on_runq(svc) )
__runq_remove(svc);
@@ -691,21 +704,8 @@
spin_unlock_irqrestore(&(prv->lock), flags);
- xfree(svc);
-}
-
-static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
-{
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
- struct csched_dom * const sdom = svc->sdom;
-
- CSCHED_STAT_CRANK(vcpu_destroy);
-
BUG_ON( sdom == NULL );
BUG_ON( !list_empty(&svc->runq_elem) );
-
- csched_free_vdata(ops, svc);
}
static void
@@ -1561,7 +1561,7 @@
.destroy_domain = csched_dom_destroy,
.insert_vcpu = csched_vcpu_insert,
- .destroy_vcpu = csched_vcpu_destroy,
+ .remove_vcpu = csched_vcpu_remove,
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
diff -r 1385b15e168f -r b2725b65076e xen/common/sched_credit2.c
--- a/xen/common/sched_credit2.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/common/sched_credit2.c Wed Oct 20 16:49:04 2010 +0100
@@ -592,7 +592,18 @@
csched_free_vdata(const struct scheduler *ops, void *priv)
{
struct csched_vcpu *svc = priv;
- struct vcpu *vc = svc->vcpu;
+
+ xfree(svc);
+}
+
+static void
+csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc)
+{
+ struct csched_vcpu * const svc = CSCHED_VCPU(vc);
+ struct csched_dom * const sdom = svc->sdom;
+
+ BUG_ON( sdom == NULL );
+ BUG_ON( !list_empty(&svc->runq_elem) );
if ( ! is_idle_vcpu(vc) )
{
@@ -610,20 +621,6 @@
svc->sdom->nr_vcpus--;
}
-
- xfree(svc);
-}
-
-static void
-csched_vcpu_destroy(const struct scheduler *ops, struct vcpu *vc)
-{
- struct csched_vcpu * const svc = CSCHED_VCPU(vc);
- struct csched_dom * const sdom = svc->sdom;
-
- BUG_ON( sdom == NULL );
- BUG_ON( !list_empty(&svc->runq_elem) );
-
- csched_free_vdata(ops, svc);
}
static void
@@ -1199,7 +1196,7 @@
.destroy_domain = csched_dom_destroy,
.insert_vcpu = csched_vcpu_insert,
- .destroy_vcpu = csched_vcpu_destroy,
+ .remove_vcpu = csched_vcpu_remove,
.sleep = csched_vcpu_sleep,
.wake = csched_vcpu_wake,
diff -r 1385b15e168f -r b2725b65076e xen/common/sched_sedf.c
--- a/xen/common/sched_sedf.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/common/sched_sedf.c Wed Oct 20 16:49:04 2010 +0100
@@ -410,11 +410,6 @@
xfree(priv);
}
-static void sedf_destroy_vcpu(const struct scheduler *ops, struct vcpu *v)
-{
- sedf_free_vdata(ops, v->sched_priv);
-}
-
static void *
sedf_alloc_domdata(const struct scheduler *ops, struct domain *d)
{
@@ -1504,8 +1499,6 @@
.init_domain = sedf_init_domain,
.destroy_domain = sedf_destroy_domain,
- .destroy_vcpu = sedf_destroy_vcpu,
-
.alloc_vdata = sedf_alloc_vdata,
.free_vdata = sedf_free_vdata,
.alloc_pdata = sedf_alloc_pdata,
diff -r 1385b15e168f -r b2725b65076e xen/common/schedule.c
--- a/xen/common/schedule.c Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/common/schedule.c Wed Oct 20 16:49:04 2010 +0100
@@ -219,6 +219,8 @@
if ( v->sched_priv == NULL )
return 1;
+ SCHED_OP(VCPU2OP(v), insert_vcpu, v);
+
return 0;
}
@@ -266,7 +268,8 @@
migrate_timer(&v->singleshot_timer, new_p);
migrate_timer(&v->poll_timer, new_p);
- SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+ SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+ SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
cpus_setall(v->cpu_affinity);
v->processor = new_p;
@@ -274,6 +277,8 @@
evtchn_move_pirqs(v);
new_p = cycle_cpu(new_p, c->cpu_valid);
+
+ SCHED_OP(VCPU2OP(v), insert_vcpu, v);
}
domain_update_node_affinity(d);
@@ -295,7 +300,8 @@
kill_timer(&v->poll_timer);
if ( test_and_clear_bool(v->is_urgent) )
atomic_dec(&per_cpu(schedule_data, v->processor).urgent_count);
- SCHED_OP(VCPU2OP(v), destroy_vcpu, v);
+ SCHED_OP(VCPU2OP(v), remove_vcpu, v);
+ SCHED_OP(VCPU2OP(v), free_vdata, v->sched_priv);
}
int sched_init_domain(struct domain *d)
diff -r 1385b15e168f -r b2725b65076e xen/include/xen/sched-if.h
--- a/xen/include/xen/sched-if.h Wed Oct 06 17:38:15 2010 +0100
+++ b/xen/include/xen/sched-if.h Wed Oct 20 16:49:04 2010 +0100
@@ -102,8 +102,9 @@
int (*init_domain) (const struct scheduler *, struct domain *);
void (*destroy_domain) (const struct scheduler *, struct domain *);
+ /* Activate / deactivate vcpus in a cpu pool */
void (*insert_vcpu) (const struct scheduler *, struct vcpu *);
- void (*destroy_vcpu) (const struct scheduler *, struct vcpu *);
+ void (*remove_vcpu) (const struct scheduler *, struct vcpu *);
void (*sleep) (const struct scheduler *, struct vcpu *);
void (*wake) (const struct scheduler *, struct vcpu *);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|