# HG changeset patch
# User Juergen Gross <juergen.gross@xxxxxxxxxxxxxx>
# Date 1298905773 0
# Node ID c5c4688d5654058add8e264a21bcf698cc3f065c
# Parent 8af88ff698ff8a4a42a10c92691f225c15aa8877
Avoid possible live-lock in vcpu_migrate
If vcpu_migrate is called for two vcpus active on different cpus
resulting in swapping the cpus, a live-lock could occur as both
instances try to take the scheduling lock of the physical cpus in
opposite order.
To avoid this problem the locks are always taken in the same order
(sorted by the address of the lock).
Signed-off-by: Jueregn Gross <juergen.gross@xxxxxxxxxxxxxx>
---
diff -r 8af88ff698ff -r c5c4688d5654 xen/common/schedule.c
--- a/xen/common/schedule.c Fri Feb 25 18:43:48 2011 +0000
+++ b/xen/common/schedule.c Mon Feb 28 15:09:33 2011 +0000
@@ -393,32 +393,52 @@
static void vcpu_migrate(struct vcpu *v)
{
unsigned long flags;
- int old_cpu, new_cpu;
- int same_lock;
+ unsigned int old_cpu, new_cpu;
+ spinlock_t *old_lock, *new_lock;
- for (;;)
+ old_cpu = new_cpu = v->processor;
+ for ( ; ; )
{
- vcpu_schedule_lock_irqsave(v, flags);
+ /*
+ * If per-cpu locks for old and new cpu are different, take the one
+ * with the lower lock address first. This avoids dead- or live-locks
+ * when this code is running on both cpus at the same time.
+ * We need another iteration if the pre-calculated lock addresses
+ * are not correct any longer after evaluating old and new cpu holding
+ * the locks.
+ */
+
+ old_lock = per_cpu(schedule_data, old_cpu).schedule_lock;
+ new_lock = per_cpu(schedule_data, new_cpu).schedule_lock;
+
+ if ( old_lock == new_lock )
+ {
+ spin_lock_irqsave(old_lock, flags);
+ }
+ else if ( old_lock < new_lock )
+ {
+ spin_lock_irqsave(old_lock, flags);
+ spin_lock(new_lock);
+ }
+ else
+ {
+ spin_lock_irqsave(new_lock, flags);
+ spin_lock(old_lock);
+ }
/* Select new CPU. */
old_cpu = v->processor;
- new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
- same_lock = (per_cpu(schedule_data, new_cpu).schedule_lock ==
- per_cpu(schedule_data, old_cpu).schedule_lock);
+ if ( old_lock == per_cpu(schedule_data, old_cpu).schedule_lock )
+ {
+ new_cpu = SCHED_OP(VCPU2OP(v), pick_cpu, v);
+ if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) &&
+ cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
+ break;
+ }
- if ( same_lock )
- break;
-
- if ( !pcpu_schedule_trylock(new_cpu) )
- {
- vcpu_schedule_unlock_irqrestore(v, flags);
- continue;
- }
- if ( cpu_isset(new_cpu, v->domain->cpupool->cpu_valid) )
- break;
-
- pcpu_schedule_unlock(new_cpu);
- vcpu_schedule_unlock_irqrestore(v, flags);
+ if ( old_lock != new_lock )
+ spin_unlock(new_lock);
+ spin_unlock_irqrestore(old_lock, flags);
}
/*
@@ -429,10 +449,9 @@
if ( v->is_running ||
!test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
{
- if ( !same_lock )
- pcpu_schedule_unlock(new_cpu);
-
- vcpu_schedule_unlock_irqrestore(v, flags);
+ if ( old_lock != new_lock )
+ spin_unlock(new_lock);
+ spin_unlock_irqrestore(old_lock, flags);
return;
}
@@ -453,11 +472,9 @@
*/
v->processor = new_cpu;
- if ( !same_lock )
- pcpu_schedule_unlock(new_cpu);
-
- spin_unlock_irqrestore(
- per_cpu(schedule_data, old_cpu).schedule_lock, flags);
+ if ( old_lock != new_lock )
+ spin_unlock(new_lock);
+ spin_unlock_irqrestore(old_lock, flags);
if ( old_cpu != new_cpu )
evtchn_move_pirqs(v);
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|